From f1bba4a87cfe48e3880f032de44a42df31f2ea8b Mon Sep 17 00:00:00 2001 From: guangli-dai Date: Fri, 13 Jun 2025 12:31:12 -0700 Subject: [PATCH] Reformat the codebase with the clang-format 18. --- include/jemalloc/internal/activity_callback.h | 9 +- include/jemalloc/internal/arena_externs.h | 136 +- include/jemalloc/internal/arena_inlines_b.h | 252 +- include/jemalloc/internal/arena_stats.h | 56 +- include/jemalloc/internal/arena_structs.h | 35 +- include/jemalloc/internal/arena_types.h | 26 +- include/jemalloc/internal/assert.h | 74 +- include/jemalloc/internal/atomic.h | 53 +- include/jemalloc/internal/atomic_c11.h | 54 +- include/jemalloc/internal/atomic_gcc_atomic.h | 165 +- include/jemalloc/internal/atomic_gcc_sync.h | 92 +- include/jemalloc/internal/atomic_msvc.h | 151 +- .../internal/background_thread_externs.h | 22 +- .../internal/background_thread_inlines.h | 8 +- .../internal/background_thread_structs.h | 26 +- include/jemalloc/internal/base.h | 44 +- include/jemalloc/internal/batcher.h | 14 +- include/jemalloc/internal/bin.h | 24 +- include/jemalloc/internal/bin_info.h | 12 +- include/jemalloc/internal/bin_stats.h | 30 +- include/jemalloc/internal/bin_types.h | 5 +- include/jemalloc/internal/bit_util.h | 52 +- include/jemalloc/internal/bitmap.h | 217 +- include/jemalloc/internal/buf_writer.h | 24 +- include/jemalloc/internal/cache_bin.h | 117 +- include/jemalloc/internal/ckh.h | 10 +- include/jemalloc/internal/counter.h | 2 +- include/jemalloc/internal/ctl.h | 135 +- include/jemalloc/internal/decay.h | 14 +- include/jemalloc/internal/ecache.h | 16 +- include/jemalloc/internal/edata.h | 281 +- include/jemalloc/internal/edata_cache.h | 16 +- include/jemalloc/internal/ehooks.h | 39 +- include/jemalloc/internal/emap.h | 99 +- include/jemalloc/internal/emitter.h | 106 +- include/jemalloc/internal/exp_grow.h | 4 +- include/jemalloc/internal/extent.h | 61 +- include/jemalloc/internal/extent_dss.h | 18 +- include/jemalloc/internal/extent_mmap.h | 4 +- include/jemalloc/internal/fb.h | 29 +- include/jemalloc/internal/fxp.h | 2 +- include/jemalloc/internal/hash.h | 316 +- include/jemalloc/internal/hook.h | 20 +- include/jemalloc/internal/hpa.h | 15 +- include/jemalloc/internal/hpa_hooks.h | 2 +- include/jemalloc/internal/hpa_utils.h | 77 +- include/jemalloc/internal/hpdata.h | 21 +- include/jemalloc/internal/inspect.h | 10 +- .../internal/jemalloc_internal_decls.h | 125 +- .../internal/jemalloc_internal_externs.h | 74 +- .../internal/jemalloc_internal_inlines_a.h | 8 +- .../internal/jemalloc_internal_inlines_b.h | 23 +- .../internal/jemalloc_internal_inlines_c.h | 357 +-- .../internal/jemalloc_internal_macros.h | 171 +- .../internal/jemalloc_internal_overrides.h | 9 +- .../internal/jemalloc_internal_types.h | 128 +- include/jemalloc/internal/large_externs.h | 18 +- include/jemalloc/internal/lockedint.h | 63 +- include/jemalloc/internal/log.h | 66 +- include/jemalloc/internal/malloc_io.h | 98 +- include/jemalloc/internal/mutex.h | 188 +- include/jemalloc/internal/mutex_prof.h | 112 +- include/jemalloc/internal/nstime.h | 39 +- include/jemalloc/internal/pa.h | 28 +- include/jemalloc/internal/pac.h | 20 +- include/jemalloc/internal/pages.h | 70 +- include/jemalloc/internal/pai.h | 13 +- include/jemalloc/internal/peak.h | 3 +- include/jemalloc/internal/peak_event.h | 2 +- include/jemalloc/internal/ph.h | 214 +- include/jemalloc/internal/prng.h | 10 +- include/jemalloc/internal/prof_data.h | 16 +- include/jemalloc/internal/prof_externs.h | 95 +- include/jemalloc/internal/prof_hook.h | 6 +- include/jemalloc/internal/prof_inlines.h | 19 +- include/jemalloc/internal/prof_log.h | 6 +- include/jemalloc/internal/prof_structs.h | 106 +- include/jemalloc/internal/prof_sys.h | 14 +- include/jemalloc/internal/prof_types.h | 48 +- include/jemalloc/internal/psset.h | 2 +- include/jemalloc/internal/ql.h | 171 +- include/jemalloc/internal/qr.h | 66 +- include/jemalloc/internal/quantum.h | 148 +- include/jemalloc/internal/rb.h | 2 +- include/jemalloc/internal/rtree.h | 263 +- include/jemalloc/internal/rtree_tsd.h | 19 +- include/jemalloc/internal/safety_check.h | 27 +- include/jemalloc/internal/san.h | 48 +- include/jemalloc/internal/san_bump.h | 9 +- include/jemalloc/internal/sc.h | 54 +- include/jemalloc/internal/sec.h | 14 +- include/jemalloc/internal/sec_opts.h | 22 +- include/jemalloc/internal/smoothstep.h | 410 +-- include/jemalloc/internal/spin.h | 9 +- include/jemalloc/internal/stats.h | 30 +- include/jemalloc/internal/sz.h | 72 +- include/jemalloc/internal/tcache_externs.h | 52 +- include/jemalloc/internal/tcache_inlines.h | 56 +- include/jemalloc/internal/tcache_structs.h | 30 +- include/jemalloc/internal/tcache_types.h | 20 +- include/jemalloc/internal/test_hooks.h | 24 +- include/jemalloc/internal/thread_event.h | 31 +- .../jemalloc/internal/thread_event_registry.h | 4 +- include/jemalloc/internal/ticker.h | 20 +- include/jemalloc/internal/tsd.h | 66 +- include/jemalloc/internal/tsd_generic.h | 47 +- include/jemalloc/internal/tsd_internals.h | 201 +- .../internal/tsd_malloc_thread_cleanup.h | 2 +- include/jemalloc/internal/tsd_tls.h | 4 +- include/jemalloc/internal/tsd_types.h | 4 +- include/jemalloc/internal/tsd_win.h | 49 +- include/jemalloc/internal/typed_list.h | 93 +- include/jemalloc/internal/util.h | 55 +- include/jemalloc/internal/witness.h | 86 +- include/msvc_compat/C99/stdint.h | 302 +- include/msvc_compat/strings.h | 39 +- msvc/test_threads/test_threads.cpp | 172 +- msvc/test_threads/test_threads_main.cpp | 7 +- src/arena.c | 504 ++- src/background_thread.c | 297 +- src/base.c | 209 +- src/batcher.c | 10 +- src/bin.c | 10 +- src/bin_info.c | 8 +- src/bitmap.c | 21 +- src/buf_writer.c | 15 +- src/cache_bin.c | 30 +- src/ckh.c | 101 +- src/counter.c | 2 +- src/ctl.c | 2262 +++++++------- src/decay.c | 71 +- src/ecache.c | 2 +- src/edata.c | 5 +- src/edata_cache.c | 15 +- src/ehooks.c | 70 +- src/emap.c | 105 +- src/eset.c | 161 +- src/extent.c | 416 +-- src/extent_dss.c | 89 +- src/extent_mmap.c | 6 +- src/fxp.c | 14 +- src/hook.c | 87 +- src/hpa.c | 227 +- src/hpa_hooks.c | 33 +- src/hpdata.c | 62 +- src/inspect.c | 6 +- src/jemalloc.c | 1499 ++++----- src/jemalloc_cpp.cpp | 109 +- src/large.c | 84 +- src/log.c | 14 +- src/malloc_io.c | 451 +-- src/mutex.c | 52 +- src/nstime.c | 42 +- src/pa.c | 32 +- src/pa_extra.c | 20 +- src/pac.c | 186 +- src/pages.c | 227 +- src/pai.c | 6 +- src/peak_event.c | 12 +- src/prof.c | 140 +- src/prof_data.c | 297 +- src/prof_log.c | 130 +- src/prof_recent.c | 79 +- src/prof_stack_range.c | 209 +- src/prof_stats.c | 4 +- src/prof_sys.c | 223 +- src/prof_threshold.c | 16 +- src/psset.c | 67 +- src/rtree.c | 125 +- src/safety_check.c | 23 +- src/san.c | 31 +- src/san_bump.c | 35 +- src/sc.c | 10 +- src/sec.c | 76 +- src/stats.c | 1045 +++---- src/sz.c | 14 +- src/tcache.c | 456 +-- src/thread_event.c | 192 +- src/thread_event_registry.c | 29 +- src/ticker.c | 15 +- src/tsd.c | 123 +- src/util.c | 5 +- src/witness.c | 14 +- src/zone.c | 113 +- test/analyze/prof_bias.c | 8 +- test/analyze/rand.c | 64 +- test/analyze/sizes.c | 9 +- test/include/test/SFMT-alti.h | 186 +- test/include/test/SFMT-params.h | 40 +- test/include/test/SFMT-params11213.h | 88 +- test/include/test/SFMT-params1279.h | 88 +- test/include/test/SFMT-params132049.h | 88 +- test/include/test/SFMT-params19937.h | 88 +- test/include/test/SFMT-params216091.h | 88 +- test/include/test/SFMT-params2281.h | 88 +- test/include/test/SFMT-params4253.h | 88 +- test/include/test/SFMT-params44497.h | 88 +- test/include/test/SFMT-params607.h | 88 +- test/include/test/SFMT-params86243.h | 88 +- test/include/test/SFMT-sse2.h | 150 +- test/include/test/SFMT.h | 84 +- test/include/test/arena_util.h | 41 +- test/include/test/bench.h | 32 +- test/include/test/bgthd.h | 4 +- test/include/test/btalloc.h | 52 +- test/include/test/extent_hooks.h | 191 +- test/include/test/fork.h | 8 +- test/include/test/math.h | 194 +- test/include/test/mq.h | 148 +- test/include/test/mtx.h | 14 +- test/include/test/nbits.h | 208 +- test/include/test/san.h | 9 +- test/include/test/test.h | 956 +++--- test/include/test/timer.h | 8 +- test/integration/MALLOCX_ARENA.c | 24 +- test/integration/aligned_alloc.c | 47 +- test/integration/allocated.c | 36 +- test/integration/cpp/basic.cpp | 3 +- test/integration/cpp/infallible_new_false.cpp | 4 +- test/integration/cpp/infallible_new_true.cpp | 12 +- test/integration/extent.c | 108 +- test/integration/malloc.c | 3 +- test/integration/mallocx.c | 117 +- test/integration/overflow.c | 19 +- test/integration/posix_memalign.c | 54 +- test/integration/rallocx.c | 132 +- test/integration/sdallocx.c | 25 +- test/integration/slab_sizes.c | 24 +- test/integration/smallocx.c | 130 +- test/integration/thread_arena.c | 29 +- test/integration/thread_tcache_enabled.c | 57 +- test/integration/xallocx.c | 111 +- test/src/SFMT.c | 739 ++--- test/src/mtx.c | 6 +- test/src/sleep.c | 4 +- test/src/test.c | 49 +- test/src/thd.c | 5 +- test/src/timer.c | 22 +- test/stress/batch_alloc.c | 35 +- test/stress/cpp/microbench.cpp | 42 +- test/stress/fill_flush.c | 18 +- test/stress/hookbench.c | 27 +- test/stress/large_microbench.c | 6 +- test/stress/mallctl.c | 35 +- test/stress/microbench.c | 19 +- test/unit/SFMT.c | 2779 ++++++++--------- test/unit/a0.c | 3 +- test/unit/arena_decay.c | 129 +- test/unit/arena_reset.c | 115 +- test/unit/atomic.c | 11 +- test/unit/background_thread.c | 37 +- test/unit/background_thread_enable.c | 50 +- test/unit/base.c | 125 +- test/unit/batch_alloc.c | 54 +- test/unit/batcher.c | 75 +- test/unit/bin_batching.c | 44 +- test/unit/binshard.c | 40 +- test/unit/bit_util.c | 168 +- test/unit/bitmap.c | 130 +- test/unit/buf_writer.c | 72 +- test/unit/cache_bin.c | 138 +- test/unit/ckh.c | 87 +- test/unit/counter.c | 13 +- test/unit/decay.c | 76 +- test/unit/div.c | 11 +- test/unit/double_free.c | 22 +- test/unit/edata_cache.c | 20 +- test/unit/emitter.c | 535 ++-- test/unit/extent_quantize.c | 76 +- test/unit/fb.c | 220 +- test/unit/fork.c | 10 +- test/unit/fxp.c | 138 +- test/unit/hash.c | 101 +- test/unit/hook.c | 177 +- test/unit/hpa.c | 211 +- test/unit/hpa_background_thread.c | 52 +- test/unit/hpa_vectorized_madvise.c | 73 +- .../unit/hpa_vectorized_madvise_large_batch.c | 48 +- test/unit/hpdata.c | 95 +- test/unit/huge.c | 64 +- test/unit/inspect.c | 120 +- test/unit/junk.c | 107 +- test/unit/log.c | 58 +- test/unit/mallctl.c | 780 ++--- test/unit/malloc_conf_2.c | 26 +- test/unit/malloc_io.c | 171 +- test/unit/math.c | 512 ++- test/unit/mpsc_queue.c | 54 +- test/unit/mq.c | 31 +- test/unit/mtx.c | 22 +- test/unit/ncached_max.c | 116 +- test/unit/nstime.c | 106 +- test/unit/oversize_threshold.c | 21 +- test/unit/pa.c | 38 +- test/unit/pack.c | 64 +- test/unit/pages.c | 18 +- test/unit/peak.c | 10 +- test/unit/ph.c | 109 +- test/unit/prng.c | 80 +- test/unit/prof_accum.c | 31 +- test/unit/prof_active.c | 46 +- test/unit/prof_gdump.c | 21 +- test/unit/prof_hook.c | 125 +- test/unit/prof_idump.c | 20 +- test/unit/prof_log.c | 45 +- test/unit/prof_mdump.c | 23 +- test/unit/prof_recent.c | 331 +- test/unit/prof_reset.c | 77 +- test/unit/prof_small.c | 9 +- test/unit/prof_stats.c | 84 +- test/unit/prof_sys_thread_name.c | 21 +- test/unit/prof_tctx.c | 19 +- test/unit/prof_thread_name.c | 54 +- test/unit/prof_threshold.c | 33 +- test/unit/psset.c | 129 +- test/unit/ql.c | 101 +- test/unit/qr.c | 73 +- test/unit/rb.c | 285 +- test/unit/retained.c | 72 +- test/unit/rtree.c | 158 +- test/unit/safety_check.c | 32 +- test/unit/san.c | 35 +- test/unit/san_bump.c | 38 +- test/unit/sc.c | 11 +- test/unit/sec.c | 116 +- test/unit/seq.c | 17 +- test/unit/size_check.c | 6 +- test/unit/size_classes.c | 112 +- test/unit/slab.c | 24 +- test/unit/smoothstep.c | 27 +- test/unit/spin.c | 3 +- test/unit/stats.c | 254 +- test/unit/stats_print.c | 717 +++-- test/unit/sz.c | 31 +- test/unit/tcache_max.c | 24 +- test/unit/test_hooks.c | 4 +- test/unit/thread_event.c | 18 +- test/unit/ticker.c | 55 +- test/unit/tsd.c | 28 +- test/unit/uaf.c | 45 +- test/unit/witness.c | 47 +- test/unit/zero.c | 26 +- test/unit/zero_realloc_abort.c | 7 +- test/unit/zero_realloc_alloc.c | 17 +- test/unit/zero_realloc_free.c | 10 +- test/unit/zero_reallocs.c | 8 +- 346 files changed, 18286 insertions(+), 17770 deletions(-) diff --git a/include/jemalloc/internal/activity_callback.h b/include/jemalloc/internal/activity_callback.h index 0f4f3962..6745f1a2 100644 --- a/include/jemalloc/internal/activity_callback.h +++ b/include/jemalloc/internal/activity_callback.h @@ -13,13 +13,14 @@ * * The calls to this thunk get driven by the peak_event module. */ -#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL} -typedef void (*activity_callback_t)(void *uctx, uint64_t allocated, - uint64_t deallocated); +#define ACTIVITY_CALLBACK_THUNK_INITIALIZER \ + { NULL, NULL } +typedef void (*activity_callback_t)( + void *uctx, uint64_t allocated, uint64_t deallocated); typedef struct activity_callback_thunk_s activity_callback_thunk_t; struct activity_callback_thunk_s { activity_callback_t callback; - void *uctx; + void *uctx; }; #endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */ diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h index 91fed258..39d2099d 100644 --- a/include/jemalloc/internal/arena_externs.h +++ b/include/jemalloc/internal/arena_externs.h @@ -21,7 +21,7 @@ extern ssize_t opt_dirty_decay_ms; extern ssize_t opt_muzzy_decay_ms; extern percpu_arena_mode_t opt_percpu_arena; -extern const char *const percpu_arena_mode_names[]; +extern const char *const percpu_arena_mode_names[]; extern div_info_t arena_binind_div_info[SC_NBINS]; @@ -30,7 +30,7 @@ extern emap_t arena_emap_global; extern size_t opt_oversize_threshold; extern size_t oversize_threshold; -extern bool opt_huge_arena_pac_thp; +extern bool opt_huge_arena_pac_thp; extern pac_thp_t huge_arena_pac_thp; /* @@ -39,90 +39,90 @@ extern pac_thp_t huge_arena_pac_thp; */ extern uint32_t arena_bin_offsets[SC_NBINS]; -void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, - unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, - ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy); void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, - bin_stats_data_t *bstats, arena_stats_large_t *lstats, - pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats); + bin_stats_data_t *bstats, arena_stats_large_t *lstats, pac_estats_t *estats, + hpa_shard_stats_t *hpastats, sec_stats_t *secstats); void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena); -edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, - size_t usize, size_t alignment, bool zero); -void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, - edata_t *edata); -void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, - edata_t *edata, size_t oldusize); -void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, - edata_t *edata, size_t oldusize); -bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, - ssize_t decay_ms); +edata_t *arena_extent_alloc_large( + tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero); +void arena_extent_dalloc_large_prep( + tsdn_t *tsdn, arena_t *arena, edata_t *edata); +void arena_extent_ralloc_large_shrink( + tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize); +void arena_extent_ralloc_large_expand( + tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize); +bool arena_decay_ms_set( + tsdn_t *tsdn, arena_t *arena, extent_state_t state, ssize_t decay_ms); ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state); -void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, - bool all); +void arena_decay( + tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena); -void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena); -void arena_reset(tsd_t *tsd, arena_t *arena); -void arena_destroy(tsd_t *tsd, arena_t *arena); -void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, - cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill_min, - const cache_bin_sz_t nfill_max); +void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_destroy(tsd_t *tsd, arena_t *arena); +void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, + cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill_min, + const cache_bin_sz_t nfill_max); -void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, - szind_t ind, bool zero, bool slab); -void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero, bool slab, tcache_t *tcache); -void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize, - size_t bumped_usize); -void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, - bool slow_path); +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero, bool slab); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero, bool slab, tcache_t *tcache); +void arena_prof_promote( + tsdn_t *tsdn, void *ptr, size_t usize, size_t bumped_usize); +void arena_dalloc_promoted( + tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab); -void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena, - edata_t *slab, bin_t *bin); -void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena, - edata_t *slab, bin_t *bin); -void arena_dalloc_small(tsdn_t *tsdn, void *ptr); -bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero, size_t *newsize); +void arena_dalloc_bin_locked_handle_newly_empty( + tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin); +void arena_dalloc_bin_locked_handle_newly_nonempty( + tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin); +void arena_dalloc_small(tsdn_t *tsdn, void *ptr); +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero, size_t *newsize); void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache, hook_ralloc_args_t *hook_args); -dss_prec_t arena_dss_prec_get(arena_t *arena); -ehooks_t *arena_get_ehooks(arena_t *arena); -extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena, - extent_hooks_t *extent_hooks); -bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); -void arena_name_get(arena_t *arena, char *name); -void arena_name_set(arena_t *arena, const char *name); +dss_prec_t arena_dss_prec_get(arena_t *arena); +ehooks_t *arena_get_ehooks(arena_t *arena); +extent_hooks_t *arena_set_extent_hooks( + tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks); +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); +void arena_name_get(arena_t *arena, char *name); +void arena_name_set(arena_t *arena, const char *name); ssize_t arena_dirty_decay_ms_default_get(void); -bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); +bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); ssize_t arena_muzzy_decay_ms_default_get(void); -bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); -bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, - size_t *old_limit, size_t *new_limit); +bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); +bool arena_retain_grow_limit_get_set( + tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit); unsigned arena_nthreads_get(arena_t *arena, bool internal); -void arena_nthreads_inc(arena_t *arena, bool internal); -void arena_nthreads_dec(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config); -bool arena_init_huge(tsdn_t *tsdn, arena_t *a0); +bool arena_init_huge(tsdn_t *tsdn, arena_t *a0); arena_t *arena_choose_huge(tsd_t *tsd); -bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, - unsigned *binshard); +bin_t *arena_bin_choose( + tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard); size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, void **ptrs, size_t nfill, bool zero); -bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa); -void arena_prefork0(tsdn_t *tsdn, arena_t *arena); -void arena_prefork1(tsdn_t *tsdn, arena_t *arena); -void arena_prefork2(tsdn_t *tsdn, arena_t *arena); -void arena_prefork3(tsdn_t *tsdn, arena_t *arena); -void arena_prefork4(tsdn_t *tsdn, arena_t *arena); -void arena_prefork5(tsdn_t *tsdn, arena_t *arena); -void arena_prefork6(tsdn_t *tsdn, arena_t *arena); -void arena_prefork7(tsdn_t *tsdn, arena_t *arena); -void arena_prefork8(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); +bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_prefork4(tsdn_t *tsdn, arena_t *arena); +void arena_prefork5(tsdn_t *tsdn, arena_t *arena); +void arena_prefork6(tsdn_t *tsdn, arena_t *arena); +void arena_prefork7(tsdn_t *tsdn, arena_t *arena); +void arena_prefork8(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); #endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */ diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 61008b59..549dfb8a 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -21,8 +21,8 @@ static inline arena_t * arena_get_from_edata(edata_t *edata) { - return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)], - ATOMIC_RELAXED); + return (arena_t *)atomic_load_p( + &arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED); } JEMALLOC_ALWAYS_INLINE arena_t * @@ -61,15 +61,17 @@ large_dalloc_safety_checks(edata_t *edata, const void *ptr, size_t input_size) { * The cost is low enough (as edata will be accessed anyway) to be * enabled all the time. */ - if (unlikely(edata == NULL || - edata_state_get(edata) != extent_state_active)) { - safety_check_fail("Invalid deallocation detected: " + if (unlikely(edata == NULL + || edata_state_get(edata) != extent_state_active)) { + safety_check_fail( + "Invalid deallocation detected: " "pages being freed (%p) not currently active, " - "possibly caused by double free bugs.", ptr); + "possibly caused by double free bugs.", + ptr); return true; } - if (unlikely(input_size != edata_usize_get(edata) || - input_size > SC_LARGE_MAXCLASS)) { + if (unlikely(input_size != edata_usize_get(edata) + || input_size > SC_LARGE_MAXCLASS)) { safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr, /* true_size */ edata_usize_get(edata), input_size); return true; @@ -86,25 +88,26 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, assert(prof_info != NULL); edata_t *edata = NULL; - bool is_slab; + bool is_slab; /* Static check. */ if (alloc_ctx == NULL) { - edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, - ptr); + edata = emap_edata_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr); is_slab = edata_slab_get(edata); } else if (unlikely(!(is_slab = alloc_ctx->slab))) { - edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, - ptr); + edata = emap_edata_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr); } if (unlikely(!is_slab)) { /* edata must have been initialized at this point. */ assert(edata != NULL); - size_t usize = (alloc_ctx == NULL)? edata_usize_get(edata): - emap_alloc_ctx_usize_get(alloc_ctx); - if (reset_recent && - large_dalloc_safety_checks(edata, ptr, usize)) { + size_t usize = (alloc_ctx == NULL) + ? edata_usize_get(edata) + : emap_alloc_ctx_usize_get(alloc_ctx); + if (reset_recent + && large_dalloc_safety_checks(edata, ptr, usize)) { prof_info->alloc_tctx = PROF_TCTX_SENTINEL; return; } @@ -119,22 +122,22 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, } JEMALLOC_ALWAYS_INLINE void -arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, - emap_alloc_ctx_t *alloc_ctx) { +arena_prof_tctx_reset( + tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); /* Static check. */ if (alloc_ctx == NULL) { - edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), - &arena_emap_global, ptr); + edata_t *edata = emap_edata_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr); if (unlikely(!edata_slab_get(edata))) { large_prof_tctx_reset(edata); } } else { if (unlikely(!alloc_ctx->slab)) { - edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), - &arena_emap_global, ptr); + edata_t *edata = emap_edata_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr); large_prof_tctx_reset(edata); } } @@ -145,16 +148,16 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) { cassert(config_prof); assert(ptr != NULL); - edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, - ptr); + edata_t *edata = emap_edata_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr); assert(!edata_slab_get(edata)); large_prof_tctx_reset(edata); } JEMALLOC_ALWAYS_INLINE void -arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, - size_t size) { +arena_prof_info_set( + tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) { cassert(config_prof); assert(!edata_slab_get(edata)); @@ -177,9 +180,9 @@ arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { * use a single ticker for all of them. */ ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd); - uint64_t *prng_state = tsd_prng_statep_get(tsd); + uint64_t *prng_state = tsd_prng_statep_get(tsd); if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks, - tsd_reentrancy_level_get(tsd) > 0))) { + tsd_reentrancy_level_get(tsd) > 0))) { arena_decay(tsdn, arena, false, false); } } @@ -197,14 +200,13 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, if (likely(tcache != NULL)) { if (likely(slab)) { assert(sz_can_use_slab(size)); - return tcache_alloc_small(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path); - } else if (likely( - ind < tcache_nbins_get(tcache->tcache_slow) && - !tcache_bin_disabled(ind, &tcache->bins[ind], - tcache->tcache_slow))) { - return tcache_alloc_large(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path); + return tcache_alloc_small(tsdn_tsd(tsdn), arena, tcache, + size, ind, zero, slow_path); + } else if (likely(ind < tcache_nbins_get(tcache->tcache_slow) + && !tcache_bin_disabled(ind, &tcache->bins[ind], + tcache->tcache_slow))) { + return tcache_alloc_large(tsdn_tsd(tsdn), arena, tcache, + size, ind, zero, slow_path); } /* (size > tcache_max) case falls through. */ } @@ -241,8 +243,8 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) { */ emap_full_alloc_ctx_t full_alloc_ctx; - bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global, - ptr, &full_alloc_ctx); + bool missing = emap_full_alloc_ctx_try_lookup( + tsdn, &arena_emap_global, ptr, &full_alloc_ctx); if (missing) { return 0; } @@ -261,8 +263,8 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) { } static inline void -arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind, - size_t usize) { +arena_dalloc_large_no_tcache( + tsdn_t *tsdn, void *ptr, szind_t szind, size_t usize) { /* * szind is still needed in this function mainly becuase * szind < SC_NBINS determines not only if this is a small alloc, @@ -272,8 +274,8 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind, if (config_prof && unlikely(szind < SC_NBINS)) { arena_dalloc_promoted(tsdn, ptr, NULL, true); } else { - edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, - ptr); + edata_t *edata = emap_edata_lookup( + tsdn, &arena_emap_global, ptr); if (large_dalloc_safety_checks(edata, ptr, usize)) { /* See the comment in isfree. */ return; @@ -290,13 +292,13 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx); if (config_debug) { - edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, - ptr); + edata_t *edata = emap_edata_lookup( + tsdn, &arena_emap_global, ptr); assert(alloc_ctx.szind == edata_szind_get(edata)); assert(alloc_ctx.szind < SC_NSIZES); assert(alloc_ctx.slab == edata_slab_get(edata)); - assert(emap_alloc_ctx_usize_get(&alloc_ctx) == - edata_usize_get(edata)); + assert(emap_alloc_ctx_usize_get(&alloc_ctx) + == edata_usize_get(edata)); } if (likely(alloc_ctx.slab)) { @@ -311,19 +313,19 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { JEMALLOC_ALWAYS_INLINE void arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind, size_t usize, bool slow_path) { - assert (!tsdn_null(tsdn) && tcache != NULL); + assert(!tsdn_null(tsdn) && tcache != NULL); bool is_sample_promoted = config_prof && szind < SC_NBINS; if (unlikely(is_sample_promoted)) { arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); } else { - if (szind < tcache_nbins_get(tcache->tcache_slow) && - !tcache_bin_disabled(szind, &tcache->bins[szind], - tcache->tcache_slow)) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, - slow_path); + if (szind < tcache_nbins_get(tcache->tcache_slow) + && !tcache_bin_disabled( + szind, &tcache->bins[szind], tcache->tcache_slow)) { + tcache_dalloc_large( + tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } else { - edata_t *edata = emap_edata_lookup(tsdn, - &arena_emap_global, ptr); + edata_t *edata = emap_edata_lookup( + tsdn, &arena_emap_global, ptr); if (large_dalloc_safety_checks(edata, ptr, usize)) { /* See the comment in isfree. */ return; @@ -335,16 +337,17 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind, /* Find the region index of a pointer. */ JEMALLOC_ALWAYS_INLINE size_t -arena_slab_regind_impl(div_info_t* div_info, szind_t binind, - edata_t *slab, const void *ptr) { +arena_slab_regind_impl( + div_info_t *div_info, szind_t binind, edata_t *slab, const void *ptr) { size_t diff, regind; /* Freeing a pointer outside the slab can cause assertion failure. */ assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab)); assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab)); /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) % - (uintptr_t)bin_infos[binind].reg_size == 0); + assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) + % (uintptr_t)bin_infos[binind].reg_size + == 0); diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)); @@ -360,22 +363,23 @@ arena_tcache_dalloc_small_safety_check(tsdn_t *tsdn, void *ptr) { if (!config_debug) { return false; } - edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); - szind_t binind = edata_szind_get(edata); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); + szind_t binind = edata_szind_get(edata); div_info_t div_info = arena_binind_div_info[binind]; /* * Calls the internal function arena_slab_regind_impl because the * safety check does not require a lock. */ size_t regind = arena_slab_regind_impl(&div_info, binind, edata, ptr); - slab_data_t *slab_data = edata_slab_data_get(edata); + slab_data_t *slab_data = edata_slab_data_get(edata); const bin_info_t *bin_info = &bin_infos[binind]; assert(edata_nfree_get(edata) < bin_info->nregs); - if (unlikely(!bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, - regind))) { + if (unlikely(!bitmap_get( + slab_data->bitmap, &bin_info->bitmap_info, regind))) { safety_check_fail( "Invalid deallocation detected: the pointer being freed (%p) not " - "currently active, possibly caused by double free bugs.\n", ptr); + "currently active, possibly caused by double free bugs.\n", + ptr); return true; } return false; @@ -397,18 +401,18 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx = *caller_alloc_ctx; } else { util_assume(tsdn != NULL); - emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, - &alloc_ctx); + emap_alloc_ctx_lookup( + tsdn, &arena_emap_global, ptr, &alloc_ctx); } if (config_debug) { - edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, - ptr); + edata_t *edata = emap_edata_lookup( + tsdn, &arena_emap_global, ptr); assert(alloc_ctx.szind == edata_szind_get(edata)); assert(alloc_ctx.szind < SC_NSIZES); assert(alloc_ctx.slab == edata_slab_get(edata)); - assert(emap_alloc_ctx_usize_get(&alloc_ctx) == - edata_usize_get(edata)); + assert(emap_alloc_ctx_usize_get(&alloc_ctx) + == edata_usize_get(edata)); } if (likely(alloc_ctx.slab)) { @@ -416,8 +420,8 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, if (arena_tcache_dalloc_small_safety_check(tsdn, ptr)) { return; } - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - alloc_ctx.szind, slow_path); + tcache_dalloc_small( + tsdn_tsd(tsdn), tcache, ptr, alloc_ctx.szind, slow_path); } else { arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, emap_alloc_ctx_usize_get(&alloc_ctx), slow_path); @@ -436,21 +440,21 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { * object, so base szind and slab on the given size. */ szind_t szind = sz_size2index(size); - emap_alloc_ctx_init(&alloc_ctx, szind, (szind < SC_NBINS), - size); + emap_alloc_ctx_init( + &alloc_ctx, szind, (szind < SC_NBINS), size); } if ((config_prof && opt_prof) || config_debug) { - emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, - &alloc_ctx); + emap_alloc_ctx_lookup( + tsdn, &arena_emap_global, ptr, &alloc_ctx); assert(alloc_ctx.szind == sz_size2index(size)); assert((config_prof && opt_prof) || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS)); if (config_debug) { - edata_t *edata = emap_edata_lookup(tsdn, - &arena_emap_global, ptr); + edata_t *edata = emap_edata_lookup( + tsdn, &arena_emap_global, ptr); assert(alloc_ctx.szind == edata_szind_get(edata)); assert(alloc_ctx.slab == edata_slab_get(edata)); } @@ -481,8 +485,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, if (config_prof && opt_prof) { if (caller_alloc_ctx == NULL) { /* Uncommon case and should be a static check. */ - emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, - &alloc_ctx); + emap_alloc_ctx_lookup( + tsdn, &arena_emap_global, ptr, &alloc_ctx); assert(alloc_ctx.szind == sz_size2index(size)); assert(emap_alloc_ctx_usize_get(&alloc_ctx) == size); } else { @@ -498,14 +502,14 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, } if (config_debug) { - edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, - ptr); + edata_t *edata = emap_edata_lookup( + tsdn, &arena_emap_global, ptr); assert(alloc_ctx.szind == edata_szind_get(edata)); assert(alloc_ctx.slab == edata_slab_get(edata)); - emap_alloc_ctx_init(&alloc_ctx, alloc_ctx.szind, alloc_ctx.slab, - sz_s2u(size)); - assert(emap_alloc_ctx_usize_get(&alloc_ctx) == - edata_usize_get(edata)); + emap_alloc_ctx_init( + &alloc_ctx, alloc_ctx.szind, alloc_ctx.slab, sz_s2u(size)); + assert(emap_alloc_ctx_usize_get(&alloc_ctx) + == edata_usize_get(edata)); } if (likely(alloc_ctx.slab)) { @@ -513,8 +517,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, if (arena_tcache_dalloc_small_safety_check(tsdn, ptr)) { return; } - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - alloc_ctx.szind, slow_path); + tcache_dalloc_small( + tsdn_tsd(tsdn), tcache, ptr, alloc_ctx.szind, slow_path); } else { arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, sz_s2u(size), slow_path); @@ -522,13 +526,13 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, } static inline void -arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata, - size_t alignment) { +arena_cache_oblivious_randomize( + tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t alignment) { assert(edata_base_get(edata) == edata_addr_get(edata)); if (alignment < PAGE) { - unsigned lg_range = LG_PAGE - - lg_floor(CACHELINE_CEILING(alignment)); + unsigned lg_range = LG_PAGE + - lg_floor(CACHELINE_CEILING(alignment)); size_t r; if (!tsdn_null(tsdn)) { tsd_t *tsd = tsdn_tsd(tsdn); @@ -538,12 +542,12 @@ arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata, uint64_t stack_value = (uint64_t)(uintptr_t)&r; r = (size_t)prng_lg_range_u64(&stack_value, lg_range); } - uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - - lg_range); - edata->e_addr = (void *)((byte_t *)edata->e_addr + - random_offset); - assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) == - edata->e_addr); + uintptr_t random_offset = ((uintptr_t)r) + << (LG_PAGE - lg_range); + edata->e_addr = (void *)((byte_t *)edata->e_addr + + random_offset); + assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) + == edata->e_addr); } } @@ -556,20 +560,21 @@ arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata, typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t; struct arena_dalloc_bin_locked_info_s { div_info_t div_info; - uint32_t nregs; - uint64_t ndalloc; + uint32_t nregs; + uint64_t ndalloc; }; JEMALLOC_ALWAYS_INLINE size_t arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab, const void *ptr) { - size_t regind = arena_slab_regind_impl(&info->div_info, binind, slab, ptr); + size_t regind = arena_slab_regind_impl( + &info->div_info, binind, slab, ptr); return regind; } JEMALLOC_ALWAYS_INLINE void -arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info, - szind_t binind) { +arena_dalloc_bin_locked_begin( + arena_dalloc_bin_locked_info_t *info, szind_t binind) { info->div_info = arena_binind_div_info[binind]; info->nregs = bin_infos[binind].nregs; info->ndalloc = 0; @@ -589,8 +594,8 @@ arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin, void *ptr, edata_t **dalloc_slabs, unsigned ndalloc_slabs, unsigned *dalloc_slabs_count, edata_list_active_t *dalloc_slabs_extra) { const bin_info_t *bin_info = &bin_infos[binind]; - size_t regind = arena_slab_regind(info, binind, slab, ptr); - slab_data_t *slab_data = edata_slab_data_get(slab); + size_t regind = arena_slab_regind(info, binind, slab, ptr); + slab_data_t *slab_data = edata_slab_data_get(slab); assert(edata_nfree_get(slab) < bin_info->nregs); /* Freeing an unallocated pointer can cause assertion failure. */ @@ -605,8 +610,8 @@ arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin, unsigned nfree = edata_nfree_get(slab); if (nfree == bin_info->nregs) { - arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab, - bin); + arena_dalloc_bin_locked_handle_newly_empty( + tsdn, arena, slab, bin); if (*dalloc_slabs_count < ndalloc_slabs) { dalloc_slabs[*dalloc_slabs_count] = slab; @@ -615,8 +620,8 @@ arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin, edata_list_active_append(dalloc_slabs_extra, slab); } } else if (nfree == 1 && slab != bin->slabcur) { - arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab, - bin); + arena_dalloc_bin_locked_handle_newly_nonempty( + tsdn, arena, slab, bin); } } @@ -637,21 +642,20 @@ arena_bin_flush_batch_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, edata_list_active_t *dalloc_slabs_extra) { assert(binind < bin_info_nbatched_sizes); bin_with_batch_t *batched_bin = (bin_with_batch_t *)bin; - size_t nelems_to_pop = batcher_pop_begin(tsdn, - &batched_bin->remote_frees); + size_t nelems_to_pop = batcher_pop_begin( + tsdn, &batched_bin->remote_frees); bin_batching_test_mid_pop(nelems_to_pop); if (nelems_to_pop == BATCHER_NO_IDX) { - malloc_mutex_assert_not_owner(tsdn, - &batched_bin->remote_frees.mtx); + malloc_mutex_assert_not_owner( + tsdn, &batched_bin->remote_frees.mtx); return; } else { - malloc_mutex_assert_owner(tsdn, - &batched_bin->remote_frees.mtx); + malloc_mutex_assert_owner(tsdn, &batched_bin->remote_frees.mtx); } - size_t npushes = batcher_pop_get_pushes(tsdn, - &batched_bin->remote_frees); + size_t npushes = batcher_pop_get_pushes( + tsdn, &batched_bin->remote_frees); bin_remote_free_data_t remote_free_data[BIN_REMOTE_FREE_ELEMS_MAX]; for (size_t i = 0; i < nelems_to_pop; i++) { remote_free_data[i] = batched_bin->remote_free_data[i]; @@ -682,8 +686,8 @@ struct arena_bin_flush_batch_state_s { * backup array for any "extra" slabs, as well as a a list to allow a * dynamic number of ones exceeding that array. */ - edata_t *dalloc_slabs[8]; - unsigned dalloc_slab_count; + edata_t *dalloc_slabs[8]; + unsigned dalloc_slab_count; edata_list_active_t dalloc_slabs_extra; }; @@ -712,8 +716,8 @@ arena_bin_flush_batch_after_lock(tsdn_t *tsdn, arena_t *arena, bin_t *bin, preallocated_slabs); arena_bin_flush_batch_impl(tsdn, arena, bin, &state->info, binind, - state->dalloc_slabs, ndalloc_slabs, - &state->dalloc_slab_count, &state->dalloc_slabs_extra); + state->dalloc_slabs, ndalloc_slabs, &state->dalloc_slab_count, + &state->dalloc_slabs_extra); } JEMALLOC_ALWAYS_INLINE void @@ -769,8 +773,8 @@ arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) { ret = shard0 + binshard; } assert(binind >= SC_NBINS - 1 - || (uintptr_t)ret < (uintptr_t)arena - + arena_bin_offsets[binind + 1]); + || (uintptr_t)ret + < (uintptr_t)arena + arena_bin_offsets[binind + 1]); return ret; } diff --git a/include/jemalloc/internal/arena_stats.h b/include/jemalloc/internal/arena_stats.h index 7f075114..01012f68 100644 --- a/include/jemalloc/internal/arena_stats.h +++ b/include/jemalloc/internal/arena_stats.h @@ -17,31 +17,31 @@ struct arena_stats_large_s { * Total number of large allocation/deallocation requests served directly * by the arena. */ - locked_u64_t nmalloc; - locked_u64_t ndalloc; + locked_u64_t nmalloc; + locked_u64_t ndalloc; /* * Total large active bytes (allocated - deallocated) served directly * by the arena. */ - locked_u64_t active_bytes; + locked_u64_t active_bytes; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ - locked_u64_t nrequests; /* Partially derived. */ + locked_u64_t nrequests; /* Partially derived. */ /* * Number of tcache fills / flushes for large (similarly, periodically * merged). Note that there is no large tcache batch-fill currently * (i.e. only fill 1 at a time); however flush may be batched. */ - locked_u64_t nfills; /* Partially derived. */ - locked_u64_t nflushes; /* Partially derived. */ + locked_u64_t nfills; /* Partially derived. */ + locked_u64_t nflushes; /* Partially derived. */ /* Current number of allocations of this size class. */ - size_t curlextents; /* Derived. */ + size_t curlextents; /* Derived. */ }; /* @@ -57,40 +57,40 @@ struct arena_stats_s { * resident includes the base stats -- that's why it lives here and not * in pa_shard_stats_t. */ - size_t base; /* Derived. */ - size_t metadata_edata; /* Derived. */ - size_t metadata_rtree; /* Derived. */ - size_t resident; /* Derived. */ - size_t metadata_thp; /* Derived. */ - size_t mapped; /* Derived. */ + size_t base; /* Derived. */ + size_t metadata_edata; /* Derived. */ + size_t metadata_rtree; /* Derived. */ + size_t resident; /* Derived. */ + size_t metadata_thp; /* Derived. */ + size_t mapped; /* Derived. */ - atomic_zu_t internal; + atomic_zu_t internal; - size_t allocated_large; /* Derived. */ - uint64_t nmalloc_large; /* Derived. */ - uint64_t ndalloc_large; /* Derived. */ - uint64_t nfills_large; /* Derived. */ - uint64_t nflushes_large; /* Derived. */ - uint64_t nrequests_large; /* Derived. */ + size_t allocated_large; /* Derived. */ + uint64_t nmalloc_large; /* Derived. */ + uint64_t ndalloc_large; /* Derived. */ + uint64_t nfills_large; /* Derived. */ + uint64_t nflushes_large; /* Derived. */ + uint64_t nrequests_large; /* Derived. */ /* * The stats logically owned by the pa_shard in the same arena. This * lives here only because it's convenient for the purposes of the ctl * module -- it only knows about the single arena_stats. */ - pa_shard_stats_t pa_shard_stats; + pa_shard_stats_t pa_shard_stats; /* Number of bytes cached in tcache associated with this arena. */ - size_t tcache_bytes; /* Derived. */ - size_t tcache_stashed_bytes; /* Derived. */ + size_t tcache_bytes; /* Derived. */ + size_t tcache_stashed_bytes; /* Derived. */ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; /* One element for each large size class. */ - arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; + arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; /* Arena uptime. */ - nstime_t uptime; + nstime_t uptime; }; static inline bool @@ -101,7 +101,7 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { } } if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats", - WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { return true; } /* Memory is zeroed, so there is no need to clear stats. */ @@ -115,8 +115,8 @@ arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS]; locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx), &lstats->nrequests, nrequests); - locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx), - &lstats->nflushes, 1); + locked_inc_u64( + tsdn, LOCKEDINT_MTX(arena_stats->mtx), &lstats->nflushes, 1); LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx); } diff --git a/include/jemalloc/internal/arena_structs.h b/include/jemalloc/internal/arena_structs.h index 56e12f95..4778ca1b 100644 --- a/include/jemalloc/internal/arena_structs.h +++ b/include/jemalloc/internal/arena_structs.h @@ -32,20 +32,20 @@ struct arena_s { * * Synchronization: atomic. */ - atomic_u_t nthreads[2]; + atomic_u_t nthreads[2]; /* Next bin shard for binding new threads. Synchronization: atomic. */ - atomic_u_t binshard_next; + atomic_u_t binshard_next; /* * When percpu_arena is enabled, to amortize the cost of reading / * updating the current CPU id, track the most recent thread accessing * this arena, and only read CPU if there is a mismatch. */ - tsdn_t *last_thd; + tsdn_t *last_thd; /* Synchronization: internal. */ - arena_stats_t stats; + arena_stats_t stats; /* * Lists of tcaches and cache_bin_array_descriptors for extant threads @@ -54,28 +54,28 @@ struct arena_s { * * Synchronization: tcache_ql_mtx. */ - ql_head(tcache_slow_t) tcache_ql; - ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; - malloc_mutex_t tcache_ql_mtx; + ql_head(tcache_slow_t) tcache_ql; + ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; + malloc_mutex_t tcache_ql_mtx; /* * Represents a dss_prec_t, but atomically. * * Synchronization: atomic. */ - atomic_u_t dss_prec; + atomic_u_t dss_prec; /* * Extant large allocations. * * Synchronization: large_mtx. */ - edata_list_active_t large; + edata_list_active_t large; /* Synchronizes all large allocation/update/deallocation. */ - malloc_mutex_t large_mtx; + malloc_mutex_t large_mtx; /* The page-level allocator shard this arena uses. */ - pa_shard_t pa_shard; + pa_shard_t pa_shard; /* * A cached copy of base->ind. This can get accessed on hot paths; @@ -88,12 +88,12 @@ struct arena_s { * * Synchronization: internal. */ - base_t *base; + base_t *base; /* Used to determine uptime. Read-only after initialization. */ - nstime_t create_time; + nstime_t create_time; /* The name of the arena. */ - char name[ARENA_NAME_LEN]; + char name[ARENA_NAME_LEN]; /* * The arena is allocated alongside its bins; really this is a @@ -101,10 +101,11 @@ struct arena_s { * Enforcing cacheline-alignment to minimize the number of cachelines * touched on the hot paths. */ - JEMALLOC_WARN_ON_USAGE("Do not use this field directly. " - "Use `arena_get_bin` instead.") + JEMALLOC_WARN_ON_USAGE( + "Do not use this field directly. " + "Use `arena_get_bin` instead.") JEMALLOC_ALIGNED(CACHELINE) - bin_with_batch_t all_bins[0]; + bin_with_batch_t all_bins[0]; }; #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */ diff --git a/include/jemalloc/internal/arena_types.h b/include/jemalloc/internal/arena_types.h index a1fc8926..7ed2b968 100644 --- a/include/jemalloc/internal/arena_types.h +++ b/include/jemalloc/internal/arena_types.h @@ -5,38 +5,38 @@ #include "jemalloc/internal/sc.h" /* Default decay times in milliseconds. */ -#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) -#define MUZZY_DECAY_MS_DEFAULT (0) +#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) +#define MUZZY_DECAY_MS_DEFAULT (0) /* Number of event ticks between time checks. */ -#define ARENA_DECAY_NTICKS_PER_UPDATE 1000 +#define ARENA_DECAY_NTICKS_PER_UPDATE 1000 /* Maximum length of the arena name. */ #define ARENA_NAME_LEN 32 typedef struct arena_decay_s arena_decay_t; -typedef struct arena_s arena_t; +typedef struct arena_s arena_t; typedef enum { - percpu_arena_mode_names_base = 0, /* Used for options processing. */ + percpu_arena_mode_names_base = 0, /* Used for options processing. */ /* * *_uninit are used only during bootstrapping, and must correspond * to initialized variant plus percpu_arena_mode_enabled_base. */ - percpu_arena_uninit = 0, - per_phycpu_arena_uninit = 1, + percpu_arena_uninit = 0, + per_phycpu_arena_uninit = 1, /* All non-disabled modes must come after percpu_arena_disabled. */ - percpu_arena_disabled = 2, + percpu_arena_disabled = 2, - percpu_arena_mode_names_limit = 3, /* Used for options processing. */ + percpu_arena_mode_names_limit = 3, /* Used for options processing. */ percpu_arena_mode_enabled_base = 3, - percpu_arena = 3, - per_phycpu_arena = 4 /* Hyper threads share arena. */ + percpu_arena = 3, + per_phycpu_arena = 4 /* Hyper threads share arena. */ } percpu_arena_mode_t; -#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) -#define PERCPU_ARENA_DEFAULT percpu_arena_disabled +#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) +#define PERCPU_ARENA_DEFAULT percpu_arena_disabled /* * When allocation_size >= oversize_threshold, use the dedicated huge arena diff --git a/include/jemalloc/internal/assert.h b/include/jemalloc/internal/assert.h index 38eb2a2c..1b5da72f 100644 --- a/include/jemalloc/internal/assert.h +++ b/include/jemalloc/internal/assert.h @@ -7,51 +7,57 @@ * assertion failure. */ #ifndef assert -#define assert(e) do { \ - if (unlikely(config_debug && !(e))) { \ - malloc_printf( \ - ": %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) +# define assert(e) \ + do { \ + if (unlikely(config_debug && !(e))) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ + } while (0) #endif #ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - ": %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ - unreachable(); \ -} while (0) +# define not_reached() \ + do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + unreachable(); \ + } while (0) #endif #ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf(": %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) +# define not_implemented() \ + do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + } while (0) #endif #ifndef assert_not_implemented -#define assert_not_implemented(e) do { \ - if (unlikely(config_debug && !(e))) { \ - not_implemented(); \ - } \ -} while (0) +# define assert_not_implemented(e) \ + do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ + } while (0) #endif /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #ifndef cassert -#define cassert(c) do { \ - if (unlikely(!(c))) { \ - not_reached(); \ - } \ -} while (0) +# define cassert(c) \ + do { \ + if (unlikely(!(c))) { \ + not_reached(); \ + } \ + } while (0) #endif diff --git a/include/jemalloc/internal/atomic.h b/include/jemalloc/internal/atomic.h index 6dd2a7c6..ddd9341e 100644 --- a/include/jemalloc/internal/atomic.h +++ b/include/jemalloc/internal/atomic.h @@ -5,21 +5,21 @@ #define JEMALLOC_U8_ATOMICS #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) -# include "jemalloc/internal/atomic_gcc_atomic.h" -# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS) -# undef JEMALLOC_U8_ATOMICS -# endif +# include "jemalloc/internal/atomic_gcc_atomic.h" +# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS) +# undef JEMALLOC_U8_ATOMICS +# endif #elif defined(JEMALLOC_GCC_SYNC_ATOMICS) -# include "jemalloc/internal/atomic_gcc_sync.h" -# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS) -# undef JEMALLOC_U8_ATOMICS -# endif +# include "jemalloc/internal/atomic_gcc_sync.h" +# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS) +# undef JEMALLOC_U8_ATOMICS +# endif #elif defined(_MSC_VER) -# include "jemalloc/internal/atomic_msvc.h" +# include "jemalloc/internal/atomic_msvc.h" #elif defined(JEMALLOC_C11_ATOMICS) -# include "jemalloc/internal/atomic_c11.h" +# include "jemalloc/internal/atomic_c11.h" #else -# error "Don't have atomics implemented on this platform." +# error "Don't have atomics implemented on this platform." #endif #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE @@ -56,22 +56,19 @@ /* * Another convenience -- simple atomic helper functions. */ -#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \ - lg_size) \ - JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ - ATOMIC_INLINE void \ - atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \ - type inc) { \ - type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ - type newval = oldval + inc; \ - atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ - } \ - ATOMIC_INLINE void \ - atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \ - type inc) { \ - type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ - type newval = oldval - inc; \ - atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ +#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, lg_size) \ + JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ + ATOMIC_INLINE void atomic_load_add_store_##short_type( \ + atomic_##short_type##_t *a, type inc) { \ + type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ + type newval = oldval + inc; \ + atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ + } \ + ATOMIC_INLINE void atomic_load_sub_store_##short_type( \ + atomic_##short_type##_t *a, type inc) { \ + type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ + type newval = oldval - inc; \ + atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ } /* @@ -79,7 +76,7 @@ * fact. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# define JEMALLOC_ATOMIC_U64 +# define JEMALLOC_ATOMIC_U64 #endif JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) diff --git a/include/jemalloc/internal/atomic_c11.h b/include/jemalloc/internal/atomic_c11.h index a37e9661..1e86e2a0 100644 --- a/include/jemalloc/internal/atomic_c11.h +++ b/include/jemalloc/internal/atomic_c11.h @@ -66,35 +66,29 @@ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ * Integral types have some special operations available that non-integral ones * lack. */ -#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ - /* unused */ lg_size) \ -JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ - \ -ATOMIC_INLINE type \ -atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ - type val, atomic_memory_order_t mo) { \ - return atomic_fetch_add_explicit(a, val, mo); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ - type val, atomic_memory_order_t mo) { \ - return atomic_fetch_sub_explicit(a, val, mo); \ -} \ -ATOMIC_INLINE type \ -atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ - type val, atomic_memory_order_t mo) { \ - return atomic_fetch_and_explicit(a, val, mo); \ -} \ -ATOMIC_INLINE type \ -atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ - type val, atomic_memory_order_t mo) { \ - return atomic_fetch_or_explicit(a, val, mo); \ -} \ -ATOMIC_INLINE type \ -atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ - type val, atomic_memory_order_t mo) { \ - return atomic_fetch_xor_explicit(a, val, mo); \ -} +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, /* unused */ lg_size) \ + JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ + ATOMIC_INLINE type atomic_fetch_add_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return atomic_fetch_add_explicit(a, val, mo); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_sub_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return atomic_fetch_sub_explicit(a, val, mo); \ + } \ + ATOMIC_INLINE type atomic_fetch_and_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return atomic_fetch_and_explicit(a, val, mo); \ + } \ + ATOMIC_INLINE type atomic_fetch_or_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return atomic_fetch_or_explicit(a, val, mo); \ + } \ + ATOMIC_INLINE type atomic_fetch_xor_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return atomic_fetch_xor_explicit(a, val, mo); \ + } #endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ diff --git a/include/jemalloc/internal/atomic_gcc_atomic.h b/include/jemalloc/internal/atomic_gcc_atomic.h index 0819fde1..a828a6b0 100644 --- a/include/jemalloc/internal/atomic_gcc_atomic.h +++ b/include/jemalloc/internal/atomic_gcc_atomic.h @@ -6,7 +6,8 @@ #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE -#define ATOMIC_INIT(...) {__VA_ARGS__} +#define ATOMIC_INIT(...) \ + { __VA_ARGS__ } typedef enum { atomic_memory_order_relaxed, @@ -39,95 +40,81 @@ atomic_fence(atomic_memory_order_t mo) { __atomic_thread_fence(atomic_enum_to_builtin(mo)); } -#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ - /* unused */ lg_size) \ -typedef struct { \ - type repr; \ -} atomic_##short_type##_t; \ - \ -ATOMIC_INLINE type \ -atomic_load_##short_type(const atomic_##short_type##_t *a, \ - atomic_memory_order_t mo) { \ - type result; \ - __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ - return result; \ -} \ - \ -ATOMIC_INLINE void \ -atomic_store_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - type result; \ - __atomic_exchange(&a->repr, &val, &result, \ - atomic_enum_to_builtin(mo)); \ - return result; \ -} \ - \ -ATOMIC_INLINE bool \ -atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ - UNUSED type *expected, type desired, \ - atomic_memory_order_t success_mo, \ - atomic_memory_order_t failure_mo) { \ - return __atomic_compare_exchange(&a->repr, expected, &desired, \ - true, atomic_enum_to_builtin(success_mo), \ - atomic_enum_to_builtin(failure_mo)); \ -} \ - \ -ATOMIC_INLINE bool \ -atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ - UNUSED type *expected, type desired, \ - atomic_memory_order_t success_mo, \ - atomic_memory_order_t failure_mo) { \ - return __atomic_compare_exchange(&a->repr, expected, &desired, \ - false, \ - atomic_enum_to_builtin(success_mo), \ - atomic_enum_to_builtin(failure_mo)); \ -} +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + typedef struct { \ + type repr; \ + } atomic_##short_type##_t; \ + \ + ATOMIC_INLINE type atomic_load_##short_type( \ + const atomic_##short_type##_t *a, atomic_memory_order_t mo) { \ + type result; \ + __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ + return result; \ + } \ + \ + ATOMIC_INLINE void atomic_store_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ + } \ + \ + ATOMIC_INLINE type atomic_exchange_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + type result; \ + __atomic_exchange( \ + &a->repr, &val, &result, atomic_enum_to_builtin(mo)); \ + return result; \ + } \ + \ + ATOMIC_INLINE bool atomic_compare_exchange_weak_##short_type( \ + atomic_##short_type##_t *a, UNUSED type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + true, atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ + } \ + \ + ATOMIC_INLINE bool atomic_compare_exchange_strong_##short_type( \ + atomic_##short_type##_t *a, UNUSED type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + false, atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ + } - -#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ - /* unused */ lg_size) \ -JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ - \ -ATOMIC_INLINE type \ -atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __atomic_fetch_add(&a->repr, val, \ - atomic_enum_to_builtin(mo)); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __atomic_fetch_sub(&a->repr, val, \ - atomic_enum_to_builtin(mo)); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __atomic_fetch_and(&a->repr, val, \ - atomic_enum_to_builtin(mo)); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __atomic_fetch_or(&a->repr, val, \ - atomic_enum_to_builtin(mo)); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __atomic_fetch_xor(&a->repr, val, \ - atomic_enum_to_builtin(mo)); \ -} +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, /* unused */ lg_size) \ + JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ + ATOMIC_INLINE type atomic_fetch_add_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __atomic_fetch_add( \ + &a->repr, val, atomic_enum_to_builtin(mo)); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_sub_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __atomic_fetch_sub( \ + &a->repr, val, atomic_enum_to_builtin(mo)); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_and_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __atomic_fetch_and( \ + &a->repr, val, atomic_enum_to_builtin(mo)); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_or_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __atomic_fetch_or( \ + &a->repr, val, atomic_enum_to_builtin(mo)); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_xor_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __atomic_fetch_xor( \ + &a->repr, val, atomic_enum_to_builtin(mo)); \ + } #undef ATOMIC_INLINE diff --git a/include/jemalloc/internal/atomic_gcc_sync.h b/include/jemalloc/internal/atomic_gcc_sync.h index 801d6197..9e2ff9c8 100644 --- a/include/jemalloc/internal/atomic_gcc_sync.h +++ b/include/jemalloc/internal/atomic_gcc_sync.h @@ -5,7 +5,8 @@ #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE -#define ATOMIC_INIT(...) {__VA_ARGS__} +#define ATOMIC_INIT(...) \ + { __VA_ARGS__ } typedef enum { atomic_memory_order_relaxed, @@ -29,13 +30,13 @@ atomic_fence(atomic_memory_order_t mo) { return; } asm volatile("" ::: "memory"); -# if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) /* This is implicit on x86. */ -# elif defined(__ppc64__) +#elif defined(__ppc64__) asm volatile("lwsync"); -# elif defined(__ppc__) +#elif defined(__ppc__) asm volatile("sync"); -# elif defined(__sparc__) && defined(__arch64__) +#elif defined(__sparc__) && defined(__arch64__) if (mo == atomic_memory_order_acquire) { asm volatile("membar #LoadLoad | #LoadStore"); } else if (mo == atomic_memory_order_release) { @@ -43,9 +44,9 @@ atomic_fence(atomic_memory_order_t mo) { } else { asm volatile("membar #LoadLoad | #LoadStore | #StoreStore"); } -# else +#else __sync_synchronize(); -# endif +#endif asm volatile("" ::: "memory"); } @@ -68,23 +69,22 @@ atomic_fence(atomic_memory_order_t mo) { ATOMIC_INLINE void atomic_pre_sc_load_fence() { -# if defined(__i386__) || defined(__x86_64__) || \ - (defined(__sparc__) && defined(__arch64__)) +#if defined(__i386__) || defined(__x86_64__) \ + || (defined(__sparc__) && defined(__arch64__)) atomic_fence(atomic_memory_order_relaxed); -# else +#else atomic_fence(atomic_memory_order_seq_cst); -# endif +#endif } ATOMIC_INLINE void atomic_post_sc_store_fence() { -# if defined(__i386__) || defined(__x86_64__) || \ - (defined(__sparc__) && defined(__arch64__)) +#if defined(__i386__) || defined(__x86_64__) \ + || (defined(__sparc__) && defined(__arch64__)) atomic_fence(atomic_memory_order_seq_cst); -# else +#else atomic_fence(atomic_memory_order_relaxed); -# endif - +#endif } /* clang-format off */ @@ -164,39 +164,33 @@ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ } /* clang-format on */ -#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ - /* unused */ lg_size) \ -JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ - \ -ATOMIC_INLINE type \ -atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __sync_fetch_and_add(&a->repr, val); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __sync_fetch_and_sub(&a->repr, val); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __sync_fetch_and_and(&a->repr, val); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __sync_fetch_and_or(&a->repr, val); \ -} \ - \ -ATOMIC_INLINE type \ -atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return __sync_fetch_and_xor(&a->repr, val); \ -} +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, /* unused */ lg_size) \ + JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ + ATOMIC_INLINE type atomic_fetch_add_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __sync_fetch_and_add(&a->repr, val); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_sub_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __sync_fetch_and_sub(&a->repr, val); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_and_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __sync_fetch_and_and(&a->repr, val); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_or_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __sync_fetch_and_or(&a->repr, val); \ + } \ + \ + ATOMIC_INLINE type atomic_fetch_xor_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return __sync_fetch_and_xor(&a->repr, val); \ + } #undef ATOMIC_INLINE diff --git a/include/jemalloc/internal/atomic_msvc.h b/include/jemalloc/internal/atomic_msvc.h index 5313aed9..7accca63 100644 --- a/include/jemalloc/internal/atomic_msvc.h +++ b/include/jemalloc/internal/atomic_msvc.h @@ -5,7 +5,8 @@ #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE -#define ATOMIC_INIT(...) {__VA_ARGS__} +#define ATOMIC_INIT(...) \ + { __VA_ARGS__ } typedef enum { atomic_memory_order_relaxed, @@ -15,108 +16,104 @@ typedef enum { atomic_memory_order_seq_cst } atomic_memory_order_t; -typedef char atomic_repr_0_t; -typedef short atomic_repr_1_t; -typedef long atomic_repr_2_t; +typedef char atomic_repr_0_t; +typedef short atomic_repr_1_t; +typedef long atomic_repr_2_t; typedef __int64 atomic_repr_3_t; ATOMIC_INLINE void atomic_fence(atomic_memory_order_t mo) { _ReadWriteBarrier(); -# if defined(_M_ARM) || defined(_M_ARM64) +#if defined(_M_ARM) || defined(_M_ARM64) /* ARM needs a barrier for everything but relaxed. */ if (mo != atomic_memory_order_relaxed) { MemoryBarrier(); } -# elif defined(_M_IX86) || defined (_M_X64) +#elif defined(_M_IX86) || defined(_M_X64) /* x86 needs a barrier only for seq_cst. */ if (mo == atomic_memory_order_seq_cst) { MemoryBarrier(); } -# else -# error "Don't know how to create atomics for this platform for MSVC." -# endif +#else +# error "Don't know how to create atomics for this platform for MSVC." +#endif _ReadWriteBarrier(); } -#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t +#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_##lg_size##_t #define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b) -#define ATOMIC_RAW_CONCAT(a, b) a ## b +#define ATOMIC_RAW_CONCAT(a, b) a##b -#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \ - base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) +#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) \ + ATOMIC_CONCAT(base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) -#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ - ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) +#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ + ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) #define ATOMIC_INTERLOCKED_SUFFIX_0 8 #define ATOMIC_INTERLOCKED_SUFFIX_1 16 #define ATOMIC_INTERLOCKED_SUFFIX_2 #define ATOMIC_INTERLOCKED_SUFFIX_3 64 -#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ -typedef struct { \ - ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ -} atomic_##short_type##_t; \ - \ -ATOMIC_INLINE type \ -atomic_load_##short_type(const atomic_##short_type##_t *a, \ - atomic_memory_order_t mo) { \ - ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ - if (mo != atomic_memory_order_relaxed) { \ - atomic_fence(atomic_memory_order_acquire); \ - } \ - return (type) ret; \ -} \ - \ -ATOMIC_INLINE void \ -atomic_store_##short_type(atomic_##short_type##_t *a, \ - type val, atomic_memory_order_t mo) { \ - if (mo != atomic_memory_order_relaxed) { \ - atomic_fence(atomic_memory_order_release); \ - } \ - a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \ - if (mo == atomic_memory_order_seq_cst) { \ - atomic_fence(atomic_memory_order_seq_cst); \ - } \ -} \ - \ -ATOMIC_INLINE type \ -atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ - return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ - lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ -} \ - \ -ATOMIC_INLINE bool \ -atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ - type *expected, type desired, atomic_memory_order_t success_mo, \ - atomic_memory_order_t failure_mo) { \ - ATOMIC_INTERLOCKED_REPR(lg_size) e = \ - (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \ - ATOMIC_INTERLOCKED_REPR(lg_size) d = \ - (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ - ATOMIC_INTERLOCKED_REPR(lg_size) old = \ - ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \ - lg_size)(&a->repr, d, e); \ - if (old == e) { \ - return true; \ - } else { \ - *expected = (type)old; \ - return false; \ - } \ -} \ - \ -ATOMIC_INLINE bool \ -atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ - type *expected, type desired, atomic_memory_order_t success_mo, \ - atomic_memory_order_t failure_mo) { \ - /* We implement the weak version with strong semantics. */ \ - return atomic_compare_exchange_weak_##short_type(a, expected, \ - desired, success_mo, failure_mo); \ -} - +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ + typedef struct { \ + ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ + } atomic_##short_type##_t; \ + \ + ATOMIC_INLINE type atomic_load_##short_type( \ + const atomic_##short_type##_t *a, atomic_memory_order_t mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_acquire); \ + } \ + return (type)ret; \ + } \ + \ + ATOMIC_INLINE void atomic_store_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_release); \ + } \ + a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size))val; \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_fence(atomic_memory_order_seq_cst); \ + } \ + } \ + \ + ATOMIC_INLINE type atomic_exchange_##short_type( \ + atomic_##short_type##_t *a, type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ + lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ + } \ + \ + ATOMIC_INLINE bool atomic_compare_exchange_weak_##short_type( \ + atomic_##short_type##_t *a, type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) \ + e = (ATOMIC_INTERLOCKED_REPR(lg_size)) * expected; \ + ATOMIC_INTERLOCKED_REPR(lg_size) \ + d = (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ + ATOMIC_INTERLOCKED_REPR(lg_size) \ + old = ATOMIC_INTERLOCKED_NAME( \ + _InterlockedCompareExchange, lg_size)(&a->repr, d, e); \ + if (old == e) { \ + return true; \ + } else { \ + *expected = (type)old; \ + return false; \ + } \ + } \ + \ + ATOMIC_INLINE bool atomic_compare_exchange_strong_##short_type( \ + atomic_##short_type##_t *a, type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + /* We implement the weak version with strong semantics. */ \ + return atomic_compare_exchange_weak_##short_type( \ + a, expected, desired, success_mo, failure_mo); \ + } /* clang-format off */ #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ diff --git a/include/jemalloc/internal/background_thread_externs.h b/include/jemalloc/internal/background_thread_externs.h index 0d34ee55..efc0aaa4 100644 --- a/include/jemalloc/internal/background_thread_externs.h +++ b/include/jemalloc/internal/background_thread_externs.h @@ -6,26 +6,26 @@ #include "jemalloc/internal/base.h" #include "jemalloc/internal/mutex.h" -extern bool opt_background_thread; -extern size_t opt_max_background_threads; -extern malloc_mutex_t background_thread_lock; -extern atomic_b_t background_thread_enabled_state; -extern size_t n_background_threads; -extern size_t max_background_threads; +extern bool opt_background_thread; +extern size_t opt_max_background_threads; +extern malloc_mutex_t background_thread_lock; +extern atomic_b_t background_thread_enabled_state; +extern size_t n_background_threads; +extern size_t max_background_threads; extern background_thread_info_t *background_thread_info; bool background_thread_create(tsd_t *tsd, unsigned arena_ind); bool background_threads_enable(tsd_t *tsd); bool background_threads_disable(tsd_t *tsd); -bool background_thread_is_started(background_thread_info_t* info); -void background_thread_wakeup_early(background_thread_info_t *info, - nstime_t *remaining_sleep); +bool background_thread_is_started(background_thread_info_t *info); +void background_thread_wakeup_early( + background_thread_info_t *info, nstime_t *remaining_sleep); void background_thread_prefork0(tsdn_t *tsdn); void background_thread_prefork1(tsdn_t *tsdn); void background_thread_postfork_parent(tsdn_t *tsdn); void background_thread_postfork_child(tsdn_t *tsdn); -bool background_thread_stats_read(tsdn_t *tsdn, - background_thread_stats_t *stats); +bool background_thread_stats_read( + tsdn_t *tsdn, background_thread_stats_t *stats); void background_thread_ctl_init(tsdn_t *tsdn); #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER diff --git a/include/jemalloc/internal/background_thread_inlines.h b/include/jemalloc/internal/background_thread_inlines.h index fd3884f1..e822a3f7 100644 --- a/include/jemalloc/internal/background_thread_inlines.h +++ b/include/jemalloc/internal/background_thread_inlines.h @@ -36,14 +36,14 @@ background_thread_info_get(size_t ind) { JEMALLOC_ALWAYS_INLINE uint64_t background_thread_wakeup_time_get(background_thread_info_t *info) { uint64_t next_wakeup = nstime_ns(&info->next_wakeup); - assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == - (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); + assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) + == (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); return next_wakeup; } JEMALLOC_ALWAYS_INLINE void -background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, - uint64_t wakeup_time) { +background_thread_wakeup_time_set( + tsdn_t *tsdn, background_thread_info_t *info, uint64_t wakeup_time) { malloc_mutex_assert_owner(tsdn, &info->mtx); atomic_store_b(&info->indefinite_sleep, wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); diff --git a/include/jemalloc/internal/background_thread_structs.h b/include/jemalloc/internal/background_thread_structs.h index 67b68797..d56673da 100644 --- a/include/jemalloc/internal/background_thread_structs.h +++ b/include/jemalloc/internal/background_thread_structs.h @@ -7,7 +7,7 @@ /* This file really combines "structs" and "types", but only transitionally. */ #if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) -# define JEMALLOC_PTHREAD_CREATE_WRAPPER +# define JEMALLOC_PTHREAD_CREATE_WRAPPER #endif #define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX @@ -35,33 +35,33 @@ typedef enum { struct background_thread_info_s { #ifdef JEMALLOC_BACKGROUND_THREAD /* Background thread is pthread specific. */ - pthread_t thread; - pthread_cond_t cond; + pthread_t thread; + pthread_cond_t cond; #endif - malloc_mutex_t mtx; - background_thread_state_t state; + malloc_mutex_t mtx; + background_thread_state_t state; /* When true, it means no wakeup scheduled. */ - atomic_b_t indefinite_sleep; + atomic_b_t indefinite_sleep; /* Next scheduled wakeup time (absolute time in ns). */ - nstime_t next_wakeup; + nstime_t next_wakeup; /* * Since the last background thread run, newly added number of pages * that need to be purged by the next wakeup. This is adjusted on * epoch advance, and is used to determine whether we should signal the * background thread to wake up earlier. */ - size_t npages_to_purge_new; + size_t npages_to_purge_new; /* Stats: total number of runs since started. */ - uint64_t tot_n_runs; + uint64_t tot_n_runs; /* Stats: total sleep time since started. */ - nstime_t tot_sleep_time; + nstime_t tot_sleep_time; }; typedef struct background_thread_info_s background_thread_info_t; struct background_thread_stats_s { - size_t num_threads; - uint64_t num_runs; - nstime_t run_interval; + size_t num_threads; + uint64_t num_runs; + nstime_t run_interval; mutex_prof_data_t max_counter_per_bg_thd; }; typedef struct background_thread_stats_s background_thread_stats_t; diff --git a/include/jemalloc/internal/base.h b/include/jemalloc/internal/base.h index c8004b25..f71a874c 100644 --- a/include/jemalloc/internal/base.h +++ b/include/jemalloc/internal/base.h @@ -13,7 +13,7 @@ #define BASE_BLOCK_MIN_ALIGN ((size_t)2 << 20) enum metadata_thp_mode_e { - metadata_thp_disabled = 0, + metadata_thp_disabled = 0, /* * Lazily enable hugepage for metadata. To avoid high RSS caused by THP * + low usage arena (i.e. THP becomes a significant percentage), the @@ -22,15 +22,15 @@ enum metadata_thp_mode_e { * arena), "auto" behaves the same as "always", i.e. madvise hugepage * right away. */ - metadata_thp_auto = 1, - metadata_thp_always = 2, + metadata_thp_auto = 1, + metadata_thp_always = 2, metadata_thp_mode_limit = 3 }; typedef enum metadata_thp_mode_e metadata_thp_mode_t; #define METADATA_THP_DEFAULT metadata_thp_disabled extern metadata_thp_mode_t opt_metadata_thp; -extern const char *const metadata_thp_mode_names[]; +extern const char *const metadata_thp_mode_names[]; /* Embedded at the beginning of every block of base-managed virtual memory. */ typedef struct base_block_s base_block_t; @@ -102,24 +102,24 @@ metadata_thp_enabled(void) { } base_t *b0get(void); -base_t *base_new(tsdn_t *tsdn, unsigned ind, - const extent_hooks_t *extent_hooks, bool metadata_use_hooks); -void base_delete(tsdn_t *tsdn, base_t *base); -ehooks_t *base_ehooks_get(base_t *base); -ehooks_t *base_ehooks_get_for_metadata(base_t *base); -extent_hooks_t *base_extent_hooks_set(base_t *base, - extent_hooks_t *extent_hooks); -void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); +base_t *base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, + bool metadata_use_hooks); +void base_delete(tsdn_t *tsdn, base_t *base); +ehooks_t *base_ehooks_get(base_t *base); +ehooks_t *base_ehooks_get_for_metadata(base_t *base); +extent_hooks_t *base_extent_hooks_set( + base_t *base, extent_hooks_t *extent_hooks); +void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base); -void *base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size); -void *b0_alloc_tcache_stack(tsdn_t *tsdn, size_t size); -void b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack); -void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, - size_t *edata_allocated, size_t *rtree_allocated, size_t *resident, - size_t *mapped, size_t *n_thp); -void base_prefork(tsdn_t *tsdn, base_t *base); -void base_postfork_parent(tsdn_t *tsdn, base_t *base); -void base_postfork_child(tsdn_t *tsdn, base_t *base); -bool base_boot(tsdn_t *tsdn); +void *base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size); +void *b0_alloc_tcache_stack(tsdn_t *tsdn, size_t size); +void b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack); +void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, + size_t *edata_allocated, size_t *rtree_allocated, size_t *resident, + size_t *mapped, size_t *n_thp); +void base_prefork(tsdn_t *tsdn, base_t *base); +void base_postfork_parent(tsdn_t *tsdn, base_t *base); +void base_postfork_child(tsdn_t *tsdn, base_t *base); +bool base_boot(tsdn_t *tsdn); #endif /* JEMALLOC_INTERNAL_BASE_H */ diff --git a/include/jemalloc/internal/batcher.h b/include/jemalloc/internal/batcher.h index 40c8b35f..3ceb8256 100644 --- a/include/jemalloc/internal/batcher.h +++ b/include/jemalloc/internal/batcher.h @@ -5,7 +5,7 @@ #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/mutex.h" -#define BATCHER_NO_IDX ((size_t)-1) +#define BATCHER_NO_IDX ((size_t) - 1) typedef struct batcher_s batcher_t; struct batcher_s { @@ -14,9 +14,9 @@ struct batcher_s { * togehter, along with the front of the mutex. The end of the mutex is * only touched if there's contention. */ - atomic_zu_t nelems; - size_t nelems_max; - size_t npushes; + atomic_zu_t nelems; + size_t nelems_max; + size_t npushes; malloc_mutex_t mtx; }; @@ -27,8 +27,8 @@ void batcher_init(batcher_t *batcher, size_t nelems_max); * BATCHER_NO_IDX if no index is free. If the former, the caller must call * batcher_push_end once done. */ -size_t batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher, - size_t elems_to_push); +size_t batcher_push_begin( + tsdn_t *tsdn, batcher_t *batcher, size_t elems_to_push); void batcher_push_end(tsdn_t *tsdn, batcher_t *batcher); /* @@ -37,7 +37,7 @@ void batcher_push_end(tsdn_t *tsdn, batcher_t *batcher); */ size_t batcher_pop_begin(tsdn_t *tsdn, batcher_t *batcher); size_t batcher_pop_get_pushes(tsdn_t *tsdn, batcher_t *batcher); -void batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher); +void batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher); void batcher_prefork(tsdn_t *tsdn, batcher_t *batcher); void batcher_postfork_parent(tsdn_t *tsdn, batcher_t *batcher); diff --git a/include/jemalloc/internal/bin.h b/include/jemalloc/internal/bin.h index c49afea6..e91583d7 100644 --- a/include/jemalloc/internal/bin.h +++ b/include/jemalloc/internal/bin.h @@ -14,8 +14,8 @@ #ifdef JEMALLOC_JET extern void (*bin_batching_test_after_push_hook)(size_t idx); extern void (*bin_batching_test_mid_pop_hook)(size_t elems_to_pop); -extern void (*bin_batching_test_after_unlock_hook)(unsigned slab_dalloc_count, - bool list_empty); +extern void (*bin_batching_test_after_unlock_hook)( + unsigned slab_dalloc_count, bool list_empty); #endif #ifdef JEMALLOC_JET @@ -50,8 +50,8 @@ bin_batching_test_after_unlock(unsigned slab_dalloc_count, bool list_empty) { (void)list_empty; #ifdef JEMALLOC_JET if (bin_batching_test_after_unlock_hook != NULL) { - bin_batching_test_after_unlock_hook(slab_dalloc_count, - list_empty); + bin_batching_test_after_unlock_hook( + slab_dalloc_count, list_empty); } #endif } @@ -63,13 +63,13 @@ bin_batching_test_after_unlock(unsigned slab_dalloc_count, bool list_empty) { typedef struct bin_s bin_t; struct bin_s { /* All operations on bin_t fields require lock ownership. */ - malloc_mutex_t lock; + malloc_mutex_t lock; /* * Bin statistics. These get touched every time the lock is acquired, * so put them close by in the hopes of getting some cache locality. */ - bin_stats_t stats; + bin_stats_t stats; /* * Current slab being used to service allocations of this bin's size @@ -77,29 +77,29 @@ struct bin_s { * slabcur is reassigned, the previous slab must be deallocated or * inserted into slabs_{nonfull,full}. */ - edata_t *slabcur; + edata_t *slabcur; /* * Heap of non-full slabs. This heap is used to assure that new * allocations come from the non-full slab that is oldest/lowest in * memory. */ - edata_heap_t slabs_nonfull; + edata_heap_t slabs_nonfull; /* List used to track full slabs. */ - edata_list_active_t slabs_full; + edata_list_active_t slabs_full; }; typedef struct bin_remote_free_data_s bin_remote_free_data_t; struct bin_remote_free_data_s { - void *ptr; + void *ptr; edata_t *slab; }; typedef struct bin_with_batch_s bin_with_batch_t; struct bin_with_batch_s { - bin_t bin; - batcher_t remote_frees; + bin_t bin; + batcher_t remote_frees; bin_remote_free_data_t remote_free_data[BIN_REMOTE_FREE_ELEMS_MAX]; }; diff --git a/include/jemalloc/internal/bin_info.h b/include/jemalloc/internal/bin_info.h index 88d58c91..0022c3f7 100644 --- a/include/jemalloc/internal/bin_info.h +++ b/include/jemalloc/internal/bin_info.h @@ -26,22 +26,22 @@ typedef struct bin_info_s bin_info_t; struct bin_info_s { /* Size of regions in a slab for this bin's size class. */ - size_t reg_size; + size_t reg_size; /* Total size of a slab for this bin's size class. */ - size_t slab_size; + size_t slab_size; /* Total number of regions in a slab for this bin's size class. */ - uint32_t nregs; + uint32_t nregs; /* Number of sharded bins in each arena for this size class. */ - uint32_t n_shards; + uint32_t n_shards; /* * Metadata used to manipulate bitmaps for slabs associated with this * bin. */ - bitmap_info_t bitmap_info; + bitmap_info_t bitmap_info; }; /* The maximum size a size class can be and still get batching behavior. */ @@ -51,7 +51,7 @@ extern size_t opt_bin_info_remote_free_max_batch; // The max number of pending elems (across all batches) extern size_t opt_bin_info_remote_free_max; -extern szind_t bin_info_nbatched_sizes; +extern szind_t bin_info_nbatched_sizes; extern unsigned bin_info_nbatched_bins; extern unsigned bin_info_nunbatched_bins; diff --git a/include/jemalloc/internal/bin_stats.h b/include/jemalloc/internal/bin_stats.h index 334c166d..e1095f38 100644 --- a/include/jemalloc/internal/bin_stats.h +++ b/include/jemalloc/internal/bin_stats.h @@ -12,52 +12,52 @@ struct bin_stats_s { * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ - uint64_t nmalloc; - uint64_t ndalloc; + uint64_t nmalloc; + uint64_t ndalloc; /* * Number of allocation requests that correspond to the size of this * bin. This includes requests served by tcache, though tcache only * periodically merges into this counter. */ - uint64_t nrequests; + uint64_t nrequests; /* * Current number of regions of this size class, including regions * currently cached by tcache. */ - size_t curregs; + size_t curregs; /* Number of tcache fills from this bin. */ - uint64_t nfills; + uint64_t nfills; /* Number of tcache flushes to this bin. */ - uint64_t nflushes; + uint64_t nflushes; /* Total number of slabs created for this bin's size class. */ - uint64_t nslabs; + uint64_t nslabs; /* * Total number of slabs reused by extracting them from the slabs heap * for this bin's size class. */ - uint64_t reslabs; + uint64_t reslabs; /* Current number of slabs in this bin. */ - size_t curslabs; + size_t curslabs; /* Current size of nonfull slabs heap in this bin. */ - size_t nonfull_slabs; + size_t nonfull_slabs; - uint64_t batch_pops; - uint64_t batch_failed_pushes; - uint64_t batch_pushes; - uint64_t batch_pushed_elems; + uint64_t batch_pops; + uint64_t batch_failed_pushes; + uint64_t batch_pushes; + uint64_t batch_pushed_elems; }; typedef struct bin_stats_data_s bin_stats_data_t; struct bin_stats_data_s { - bin_stats_t stats_data; + bin_stats_t stats_data; mutex_prof_data_t mutex_data; }; #endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ diff --git a/include/jemalloc/internal/bin_types.h b/include/jemalloc/internal/bin_types.h index 5ec22dfd..b6bad37e 100644 --- a/include/jemalloc/internal/bin_types.h +++ b/include/jemalloc/internal/bin_types.h @@ -8,7 +8,10 @@ #define N_BIN_SHARDS_DEFAULT 1 /* Used in TSD static initializer only. Real init in arena_bind(). */ -#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}} +#define TSD_BINSHARDS_ZERO_INITIALIZER \ + { \ + { UINT8_MAX } \ + } typedef struct tsd_binshards_s tsd_binshards_t; struct tsd_binshards_s { diff --git a/include/jemalloc/internal/bit_util.h b/include/jemalloc/internal/bit_util.h index 840dbde2..88c7942e 100644 --- a/include/jemalloc/internal/bit_util.h +++ b/include/jemalloc/internal/bit_util.h @@ -5,9 +5,9 @@ #include "jemalloc/internal/assert.h" /* Sanity check. */ -#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ +#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ || !defined(JEMALLOC_INTERNAL_FFS) -# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure +# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure #endif /* @@ -110,15 +110,17 @@ fls_u(unsigned x) { } #elif defined(_MSC_VER) -#if LG_SIZEOF_PTR == 3 -#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x) -#else +# if LG_SIZEOF_PTR == 3 +# define DO_BSR64(bit, x) _BitScanReverse64(&bit, x) +# else /* * This never actually runs; we're just dodging a compiler error for the * never-taken branch where sizeof(void *) == 8. */ -#define DO_BSR64(bit, x) bit = 0; unreachable() -#endif +# define DO_BSR64(bit, x) \ + bit = 0; \ + unreachable() +# endif /* clang-format off */ #define DO_FLS(x) do { \ @@ -164,8 +166,8 @@ fls_u(unsigned x) { DO_FLS(x); } -#undef DO_FLS -#undef DO_BSR64 +# undef DO_FLS +# undef DO_BSR64 #else static inline unsigned @@ -185,7 +187,7 @@ fls_u(unsigned x) { #endif #if LG_SIZEOF_LONG_LONG > 3 -# error "Haven't implemented popcount for 16-byte ints." +# error "Haven't implemented popcount for 16-byte ints." #endif /* clang-format off */ @@ -284,7 +286,7 @@ popcount_llu(unsigned long long bitmap) { */ static inline size_t -cfs_lu(unsigned long* bitmap) { +cfs_lu(unsigned long *bitmap) { util_assume(*bitmap != 0); size_t bit = ffs_lu(*bitmap); *bitmap ^= ZU(1) << bit; @@ -300,7 +302,7 @@ ffs_zu(size_t x) { #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG return ffs_llu(x); #else -#error No implementation for size_t ffs() +# error No implementation for size_t ffs() #endif } @@ -313,11 +315,10 @@ fls_zu(size_t x) { #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG return fls_llu(x); #else -#error No implementation for size_t fls() +# error No implementation for size_t fls() #endif } - static inline unsigned ffs_u64(uint64_t x) { #if LG_SIZEOF_LONG == 3 @@ -325,7 +326,7 @@ ffs_u64(uint64_t x) { #elif LG_SIZEOF_LONG_LONG == 3 return ffs_llu(x); #else -#error No implementation for 64-bit ffs() +# error No implementation for 64-bit ffs() #endif } @@ -336,7 +337,7 @@ fls_u64(uint64_t x) { #elif LG_SIZEOF_LONG_LONG == 3 return fls_llu(x); #else -#error No implementation for 64-bit fls() +# error No implementation for 64-bit fls() #endif } @@ -345,7 +346,7 @@ ffs_u32(uint32_t x) { #if LG_SIZEOF_INT == 2 return ffs_u(x); #else -#error No implementation for 32-bit ffs() +# error No implementation for 32-bit ffs() #endif } @@ -354,7 +355,7 @@ fls_u32(uint32_t x) { #if LG_SIZEOF_INT == 2 return fls_u(x); #else -#error No implementation for 32-bit fls() +# error No implementation for 32-bit fls() #endif } @@ -375,7 +376,7 @@ pow2_ceil_u64(uint64_t x) { static inline uint32_t pow2_ceil_u32(uint32_t x) { if (unlikely(x <= 1)) { - return x; + return x; } size_t msb_on_index = fls_u32(x - 1); /* As above. */ @@ -413,13 +414,16 @@ lg_ceil(size_t x) { #define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1)) #define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2)) #define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4)) -#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8)) -#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16)) -#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32)) +#define LG_FLOOR_16(x) \ + (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8)) +#define LG_FLOOR_32(x) \ + (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16)) +#define LG_FLOOR_64(x) \ + (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32)) #if LG_SIZEOF_PTR == 2 -# define LG_FLOOR(x) LG_FLOOR_32((x)) +# define LG_FLOOR(x) LG_FLOOR_32((x)) #else -# define LG_FLOOR(x) LG_FLOOR_64((x)) +# define LG_FLOOR(x) LG_FLOOR_64((x)) #endif #define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1)) diff --git a/include/jemalloc/internal/bitmap.h b/include/jemalloc/internal/bitmap.h index 8cd5f5a3..e0f596fb 100644 --- a/include/jemalloc/internal/bitmap.h +++ b/include/jemalloc/internal/bitmap.h @@ -6,22 +6,22 @@ #include "jemalloc/internal/sc.h" typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) /* Maximum bitmap bit count is determined by maximum regions per slab. */ -# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS +# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS #else /* Maximum bitmap bit count is determined by number of extent size classes. */ -# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES) +# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES) #endif -#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) /* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS - 1) /* * Do some analysis on how big the bitmap is before we use a tree. For a brute @@ -29,67 +29,64 @@ typedef unsigned long bitmap_t; * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 -# define BITMAP_USE_TREE +# define BITMAP_USE_TREE #endif /* Number of groups required to store a given number of bits. */ -#define BITMAP_BITS2GROUPS(nbits) \ - (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) +#define BITMAP_BITS2GROUPS(nbits) \ + (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) /* * Number of groups required at a particular level for a given number of bits. */ -#define BITMAP_GROUPS_L0(nbits) \ - BITMAP_BITS2GROUPS(nbits) -#define BITMAP_GROUPS_L1(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) -#define BITMAP_GROUPS_L2(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) -#define BITMAP_GROUPS_L3(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ - BITMAP_BITS2GROUPS((nbits))))) -#define BITMAP_GROUPS_L4(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) +#define BITMAP_GROUPS_L0(nbits) BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))) +#define BITMAP_GROUPS_L4(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) /* * Assuming the number of levels, number of groups required for a given number * of bits. */ -#define BITMAP_GROUPS_1_LEVEL(nbits) \ - BITMAP_GROUPS_L0(nbits) -#define BITMAP_GROUPS_2_LEVEL(nbits) \ - (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) -#define BITMAP_GROUPS_3_LEVEL(nbits) \ - (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) -#define BITMAP_GROUPS_4_LEVEL(nbits) \ - (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) -#define BITMAP_GROUPS_5_LEVEL(nbits) \ - (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) +#define BITMAP_GROUPS_1_LEVEL(nbits) BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) +#define BITMAP_GROUPS_5_LEVEL(nbits) \ + (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) /* * Maximum number of groups required to support LG_BITMAP_MAXBITS. */ #ifdef BITMAP_USE_TREE -#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS -# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 -# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 -# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 -# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 -# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) -#else -# error "Unsupported bitmap size" -#endif +# if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +# elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) +# else +# error "Unsupported bitmap size" +# endif /* * Maximum number of levels possible. This could be statically computed based @@ -105,42 +102,53 @@ typedef unsigned long bitmap_t; * unused trailing entries in bitmap_info_t structures; the bitmaps themselves * are not impacted. */ -#define BITMAP_MAX_LEVELS 5 +# define BITMAP_MAX_LEVELS 5 -#define BITMAP_INFO_INITIALIZER(nbits) { \ - /* nbits. */ \ - nbits, \ - /* nlevels. */ \ - (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ - (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ - (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ - (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ - /* levels. */ \ - { \ - {0}, \ - {BITMAP_GROUPS_L0(nbits)}, \ - {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ - {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ - BITMAP_GROUPS_L0(nbits)}, \ - {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ - BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ - {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ - BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ - + BITMAP_GROUPS_L0(nbits)} \ - } \ -} +# define BITMAP_INFO_INITIALIZER(nbits) \ + { \ + /* nbits. */ \ + nbits, /* nlevels. */ \ + (BITMAP_GROUPS_L0(nbits) \ + > BITMAP_GROUPS_L1(nbits)) \ + + (BITMAP_GROUPS_L1(nbits) \ + > BITMAP_GROUPS_L2(nbits)) \ + + (BITMAP_GROUPS_L2(nbits) \ + > BITMAP_GROUPS_L3(nbits)) \ + + (BITMAP_GROUPS_L3(nbits) \ + > BITMAP_GROUPS_L4(nbits)) \ + + 1, /* levels. */ \ + { \ + {0}, {BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L2(nbits) \ + + BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L3(nbits) \ + + BITMAP_GROUPS_L2(nbits) \ + + BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits)}, \ + { \ + BITMAP_GROUPS_L4(nbits) \ + + BITMAP_GROUPS_L3(nbits) \ + + BITMAP_GROUPS_L2(nbits) \ + + BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits) \ + } \ + } \ + } #else /* BITMAP_USE_TREE */ -#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) -#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) +# define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) +# define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) -#define BITMAP_INFO_INITIALIZER(nbits) { \ - /* nbits. */ \ - nbits, \ - /* ngroups. */ \ - BITMAP_BITS2GROUPS(nbits) \ -} +# define BITMAP_INFO_INITIALIZER(nbits) \ + { \ + /* nbits. */ \ + nbits, /* ngroups. */ \ + BITMAP_BITS2GROUPS(nbits) \ + } #endif /* BITMAP_USE_TREE */ @@ -161,21 +169,21 @@ typedef struct bitmap_info_s { * Only the first (nlevels+1) elements are used, and levels are ordered * bottom to top (e.g. the bottom level is stored in levels[0]). */ - bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -#else /* BITMAP_USE_TREE */ + bitmap_level_t levels[BITMAP_MAX_LEVELS + 1]; +#else /* BITMAP_USE_TREE */ /* Number of groups necessary for nbits. */ size_t ngroups; #endif /* BITMAP_USE_TREE */ } bitmap_info_t; -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); size_t bitmap_size(const bitmap_info_t *binfo); static inline bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { #ifdef BITMAP_USE_TREE - size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; + size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); @@ -193,7 +201,7 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { static inline bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { - size_t goff; + size_t goff; bitmap_t g; assert(bit < binfo->nbits); @@ -204,9 +212,9 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { static inline void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { - size_t goff; + size_t goff; bitmap_t *gp; - bitmap_t g; + bitmap_t g; assert(bit < binfo->nbits); assert(!bitmap_get(bitmap, binfo, bit)); @@ -245,12 +253,13 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { #ifdef BITMAP_USE_TREE size_t bit = 0; for (unsigned level = binfo->nlevels; level--;) { - size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + - 1)); - bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit - >> lg_bits_per_group)]; - unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - - bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); + size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS + * (level + 1)); + bitmap_t group = bitmap[binfo->levels[level].group_offset + + (bit >> lg_bits_per_group)]; + unsigned group_nmask = + (unsigned)(((min_bit > bit) ? (min_bit - bit) : 0) + >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); assert(group_nmask <= BITMAP_GROUP_NBITS); bitmap_t group_mask = ~((1LU << group_nmask) - 1); bitmap_t group_masked = group & group_mask; @@ -273,16 +282,16 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { } return bitmap_ffu(bitmap, binfo, sib_base); } - bit += ((size_t)ffs_lu(group_masked)) << - (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); + bit += ((size_t)ffs_lu(group_masked)) + << (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); } assert(bit >= min_bit); assert(bit < binfo->nbits); return bit; #else - size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; - bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) - - 1); + size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; + bitmap_t g = bitmap[i] + & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) - 1); size_t bit; while (1) { if (g != 0) { @@ -302,7 +311,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { /* sfu: set first unset. */ static inline size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { - size_t bit; + size_t bit; bitmap_t g; unsigned i; @@ -332,9 +341,9 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { static inline void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { - size_t goff; - bitmap_t *gp; - bitmap_t g; + size_t goff; + bitmap_t *gp; + bitmap_t g; UNUSED bool propagate; assert(bit < binfo->nbits); diff --git a/include/jemalloc/internal/buf_writer.h b/include/jemalloc/internal/buf_writer.h index fa0ac99c..5ee9af4e 100644 --- a/include/jemalloc/internal/buf_writer.h +++ b/include/jemalloc/internal/buf_writer.h @@ -16,21 +16,21 @@ typedef struct { write_cb_t *write_cb; - void *cbopaque; - char *buf; - size_t buf_size; - size_t buf_end; - bool internal_buf; + void *cbopaque; + char *buf; + size_t buf_size; + size_t buf_end; + bool internal_buf; } buf_writer_t; -bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, - write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len); -void buf_writer_flush(buf_writer_t *buf_writer); +bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, + write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len); +void buf_writer_flush(buf_writer_t *buf_writer); write_cb_t buf_writer_cb; -void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer); +void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer); -typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit); -void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb, - void *read_cbopaque); +typedef ssize_t(read_cb_t)(void *read_cbopaque, void *buf, size_t limit); +void buf_writer_pipe( + buf_writer_t *buf_writer, read_cb_t *read_cb, void *read_cbopaque); #endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */ diff --git a/include/jemalloc/internal/cache_bin.h b/include/jemalloc/internal/cache_bin.h index 7ab48dc9..08ee0d6a 100644 --- a/include/jemalloc/internal/cache_bin.h +++ b/include/jemalloc/internal/cache_bin.h @@ -45,8 +45,8 @@ extern const uintptr_t disabled_bin; * 1 << (sizeof(cache_bin_sz_t) * 8) * bytes spread across pointer sized objects to get the maximum. */ -#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \ - / sizeof(void *) - 1) +#define CACHE_BIN_NCACHED_MAX \ + (((size_t)1 << sizeof(cache_bin_sz_t) * 8) / sizeof(void *) - 1) /* * This lives inside the cache_bin (for locality reasons), and is initialized @@ -152,8 +152,8 @@ struct cache_bin_array_descriptor_s { }; static inline void -cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, - cache_bin_t *bins) { +cache_bin_array_descriptor_init( + cache_bin_array_descriptor_t *descriptor, cache_bin_t *bins) { ql_elm_new(descriptor, link); descriptor->bins = bins; } @@ -222,7 +222,8 @@ cache_bin_ncached_max_get(cache_bin_t *bin) { * with later. */ static inline void -cache_bin_assert_earlier(cache_bin_t *bin, cache_bin_sz_t earlier, cache_bin_sz_t later) { +cache_bin_assert_earlier( + cache_bin_t *bin, cache_bin_sz_t earlier, cache_bin_sz_t later) { if (earlier > later) { assert(bin->low_bits_full > bin->low_bits_empty); } @@ -285,8 +286,8 @@ static inline void ** cache_bin_empty_position_get(cache_bin_t *bin) { cache_bin_sz_t diff = cache_bin_diff(bin, (cache_bin_sz_t)(uintptr_t)bin->stack_head, bin->low_bits_empty); - byte_t *empty_bits = (byte_t *)bin->stack_head + diff; - void **ret = (void **)empty_bits; + byte_t *empty_bits = (byte_t *)bin->stack_head + diff; + void **ret = (void **)empty_bits; assert(ret >= bin->stack_head); @@ -305,8 +306,8 @@ cache_bin_empty_position_get(cache_bin_t *bin) { */ static inline cache_bin_sz_t cache_bin_low_bits_low_bound_get(cache_bin_t *bin) { - return (cache_bin_sz_t)bin->low_bits_empty - - cache_bin_ncached_max_get(bin) * sizeof(void *); + return (cache_bin_sz_t)bin->low_bits_empty + - cache_bin_ncached_max_get(bin) * sizeof(void *); } /* @@ -317,7 +318,7 @@ cache_bin_low_bits_low_bound_get(cache_bin_t *bin) { static inline void ** cache_bin_low_bound_get(cache_bin_t *bin) { cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin); - void **ret = cache_bin_empty_position_get(bin) - ncached_max; + void **ret = cache_bin_empty_position_get(bin) - ncached_max; assert(ret <= bin->stack_head); return ret; @@ -340,8 +341,8 @@ cache_bin_assert_empty(cache_bin_t *bin) { */ static inline cache_bin_sz_t cache_bin_low_water_get_internal(cache_bin_t *bin) { - return cache_bin_diff(bin, bin->low_bits_low_water, - bin->low_bits_empty) / sizeof(void *); + return cache_bin_diff(bin, bin->low_bits_low_water, bin->low_bits_empty) + / sizeof(void *); } /* Returns the numeric value of low water in [0, ncached]. */ @@ -351,7 +352,8 @@ cache_bin_low_water_get(cache_bin_t *bin) { assert(low_water <= cache_bin_ncached_max_get(bin)); assert(low_water <= cache_bin_ncached_get_local(bin)); - cache_bin_assert_earlier(bin, (cache_bin_sz_t)(uintptr_t)bin->stack_head, + cache_bin_assert_earlier(bin, + (cache_bin_sz_t)(uintptr_t)bin->stack_head, bin->low_bits_low_water); return low_water; @@ -390,9 +392,9 @@ cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) { * This may read from the empty position; however the loaded value won't * be used. It's safe because the stack has one more slot reserved. */ - void *ret = *bin->stack_head; + void *ret = *bin->stack_head; cache_bin_sz_t low_bits = (cache_bin_sz_t)(uintptr_t)bin->stack_head; - void **new_head = bin->stack_head + 1; + void **new_head = bin->stack_head + 1; /* * Note that the low water mark is at most empty; if we pass this check, @@ -455,7 +457,8 @@ cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) { JEMALLOC_ALWAYS_INLINE bool cache_bin_full(cache_bin_t *bin) { - return ((cache_bin_sz_t)(uintptr_t)bin->stack_head == bin->low_bits_full); + return ( + (cache_bin_sz_t)(uintptr_t)bin->stack_head == bin->low_bits_full); } /* @@ -469,9 +472,9 @@ cache_bin_dalloc_safety_checks(cache_bin_t *bin, void *ptr) { } cache_bin_sz_t ncached = cache_bin_ncached_get_internal(bin); - unsigned max_scan = opt_debug_double_free_max_scan < ncached - ? opt_debug_double_free_max_scan - : ncached; + unsigned max_scan = opt_debug_double_free_max_scan < ncached + ? opt_debug_double_free_max_scan + : ncached; void **cur = bin->stack_head; void **limit = cur + max_scan; @@ -516,9 +519,11 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) { } /* Stash at the full position, in the [full, head) range. */ - cache_bin_sz_t low_bits_head = (cache_bin_sz_t)(uintptr_t)bin->stack_head; + cache_bin_sz_t low_bits_head = (cache_bin_sz_t)(uintptr_t) + bin->stack_head; /* Wraparound handled as well. */ - cache_bin_sz_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head); + cache_bin_sz_t diff = cache_bin_diff( + bin, bin->low_bits_full, low_bits_head); *(void **)((byte_t *)bin->stack_head - diff) = ptr; assert(!cache_bin_full(bin)); @@ -532,18 +537,21 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) { JEMALLOC_ALWAYS_INLINE cache_bin_sz_t cache_bin_nstashed_get_internal(cache_bin_t *bin) { cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin); - cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin); + cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get( + bin); - cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound, - bin->low_bits_full) / sizeof(void *); + cache_bin_sz_t n = cache_bin_diff( + bin, low_bits_low_bound, bin->low_bits_full) + / sizeof(void *); assert(n <= ncached_max); if (config_debug && n != 0) { /* Below are for assertions only. */ void **low_bound = cache_bin_low_bound_get(bin); - assert((cache_bin_sz_t)(uintptr_t)low_bound == low_bits_low_bound); + assert( + (cache_bin_sz_t)(uintptr_t)low_bound == low_bits_low_bound); void *stashed = *(low_bound + n - 1); - bool aligned = cache_bin_nonfast_aligned(stashed); + bool aligned = cache_bin_nonfast_aligned(stashed); #ifdef JEMALLOC_JET /* Allow arbitrary pointers to be stashed in tests. */ aligned = true; @@ -582,16 +590,17 @@ cache_bin_nstashed_get_local(cache_bin_t *bin) { * they help access values that will not be concurrently modified. */ static inline void -cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_sz_t *ncached, - cache_bin_sz_t *nstashed) { +cache_bin_nitems_get_remote( + cache_bin_t *bin, cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) { /* Racy version of cache_bin_ncached_get_internal. */ - cache_bin_sz_t diff = bin->low_bits_empty - - (cache_bin_sz_t)(uintptr_t)bin->stack_head; + cache_bin_sz_t diff = bin->low_bits_empty + - (cache_bin_sz_t)(uintptr_t)bin->stack_head; cache_bin_sz_t n = diff / sizeof(void *); *ncached = n; /* Racy version of cache_bin_nstashed_get_internal. */ - cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin); + cache_bin_sz_t low_bits_low_bound = cache_bin_low_bits_low_bound_get( + bin); n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *); *nstashed = n; /* @@ -616,7 +625,8 @@ struct cache_bin_fill_ctl_s { * This is to avoid stack overflow when we do batch edata look up, which * reserves a nflush * sizeof(emap_batch_lookup_result_t) stack variable. */ -#define CACHE_BIN_NFLUSH_BATCH_MAX ((VARIABLE_ARRAY_SIZE_MAX >> LG_SIZEOF_PTR) - 1) +#define CACHE_BIN_NFLUSH_BATCH_MAX \ + ((VARIABLE_ARRAY_SIZE_MAX >> LG_SIZEOF_PTR) - 1) /* * Filling and flushing are done in batch, on arrays of void *s. For filling, @@ -638,7 +648,7 @@ struct cache_bin_fill_ctl_s { typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t; struct cache_bin_ptr_array_s { cache_bin_sz_t n; - void **ptr; + void **ptr; }; /* @@ -650,17 +660,17 @@ struct cache_bin_ptr_array_s { * representations is easy (since they'll require an alloca in the calling * frame). */ -#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \ - cache_bin_ptr_array_t name; \ - name.n = (nval) +#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \ + cache_bin_ptr_array_t name; \ + name.n = (nval) /* * Start a fill. The bin must be empty, and This must be followed by a * finish_fill call before doing any alloc/dalloc operations on the bin. */ static inline void -cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr, - cache_bin_sz_t nfill) { +cache_bin_init_ptr_array_for_fill( + cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) { cache_bin_assert_empty(bin); arr->ptr = cache_bin_empty_position_get(bin) - nfill; } @@ -671,8 +681,8 @@ cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr, * case of OOM. */ static inline void -cache_bin_finish_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr, - cache_bin_sz_t nfilled) { +cache_bin_finish_fill( + cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) { cache_bin_assert_empty(bin); void **empty_position = cache_bin_empty_position_get(bin); if (nfilled < arr->n) { @@ -687,19 +697,18 @@ cache_bin_finish_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr, * everything we give them. */ static inline void -cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, - cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) { +cache_bin_init_ptr_array_for_flush( + cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) { arr->ptr = cache_bin_empty_position_get(bin) - nflush; - assert(cache_bin_ncached_get_local(bin) == 0 - || *arr->ptr != NULL); + assert(cache_bin_ncached_get_local(bin) == 0 || *arr->ptr != NULL); } static inline void -cache_bin_finish_flush(cache_bin_t *bin, cache_bin_ptr_array_t *arr, - cache_bin_sz_t nflushed) { +cache_bin_finish_flush( + cache_bin_t *bin, cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) { unsigned rem = cache_bin_ncached_get_local(bin) - nflushed; - memmove(bin->stack_head + nflushed, bin->stack_head, - rem * sizeof(void *)); + memmove( + bin->stack_head + nflushed, bin->stack_head, rem * sizeof(void *)); bin->stack_head += nflushed; cache_bin_low_water_adjust(bin); } @@ -728,14 +737,14 @@ cache_bin_finish_flush_stashed(cache_bin_t *bin) { * Initialize a cache_bin_info to represent up to the given number of items in * the cache_bins it is associated with. */ -void cache_bin_info_init(cache_bin_info_t *bin_info, - cache_bin_sz_t ncached_max); +void cache_bin_info_init( + cache_bin_info_t *bin_info, cache_bin_sz_t ncached_max); /* * Given an array of initialized cache_bin_info_ts, determine how big an * allocation is required to initialize a full set of cache_bin_ts. */ -void cache_bin_info_compute_alloc(const cache_bin_info_t *infos, - szind_t ninfos, size_t *size, size_t *alignment); +void cache_bin_info_compute_alloc(const cache_bin_info_t *infos, szind_t ninfos, + size_t *size, size_t *alignment); /* * Actually initialize some cache bins. Callers should allocate the backing @@ -747,8 +756,8 @@ void cache_bin_info_compute_alloc(const cache_bin_info_t *infos, void cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos, void *alloc, size_t *cur_offset); void cache_bin_postincrement(void *alloc, size_t *cur_offset); -void cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, - void *alloc, size_t *cur_offset); +void cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc, + size_t *cur_offset); void cache_bin_init_disabled(cache_bin_t *bin, cache_bin_sz_t ncached_max); bool cache_bin_stack_use_thp(void); diff --git a/include/jemalloc/internal/ckh.h b/include/jemalloc/internal/ckh.h index 8e9d7fed..01b27e8f 100644 --- a/include/jemalloc/internal/ckh.h +++ b/include/jemalloc/internal/ckh.h @@ -22,8 +22,8 @@ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) /* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); +typedef void ckh_hash_t(const void *, size_t[2]); +typedef bool ckh_keycomp_t(const void *, const void *); /* Hash table cell. */ typedef struct { @@ -56,7 +56,7 @@ typedef struct { unsigned lg_curbuckets; /* Hash and comparison functions. */ - ckh_hash_t *hash; + ckh_hash_t *hash; ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ @@ -89,8 +89,8 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); * the key and value, and doesn't do any lifetime management. */ bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data); +bool ckh_remove( + tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); /* Some useful hash and comparison functions for strings and pointers. */ diff --git a/include/jemalloc/internal/counter.h b/include/jemalloc/internal/counter.h index 74e30701..0f38d40c 100644 --- a/include/jemalloc/internal/counter.h +++ b/include/jemalloc/internal/counter.h @@ -8,7 +8,7 @@ typedef struct counter_accum_s { LOCKEDINT_MTX_DECLARE(mtx) locked_u64_t accumbytes; - uint64_t interval; + uint64_t interval; } counter_accum_t; JEMALLOC_ALWAYS_INLINE bool diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index 1f124bfc..b290411b 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -13,7 +13,7 @@ #include "jemalloc/internal/stats.h" /* Maximum ctl tree depth. */ -#define CTL_MAX_DEPTH 7 +#define CTL_MAX_DEPTH 7 #define CTL_MULTI_SETTING_MAX_LEN 1000 typedef struct ctl_node_s { @@ -21,37 +21,37 @@ typedef struct ctl_node_s { } ctl_node_t; typedef struct ctl_named_node_s { - ctl_node_t node; + ctl_node_t node; const char *name; /* If (nchildren == 0), this is a terminal node. */ - size_t nchildren; + size_t nchildren; const ctl_node_t *children; - int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, - size_t); + int (*ctl)( + tsd_t *, const size_t *, size_t, void *, size_t *, void *, size_t); } ctl_named_node_t; typedef struct ctl_indexed_node_s { struct ctl_node_s node; - const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, - size_t); + const ctl_named_node_t *(*index)( + tsdn_t *, const size_t *, size_t, size_t); } ctl_indexed_node_t; typedef struct ctl_arena_stats_s { arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; + size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; uint64_t nfills_small; uint64_t nflushes_small; - bin_stats_data_t bstats[SC_NBINS]; + bin_stats_data_t bstats[SC_NBINS]; arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; - pac_estats_t estats[SC_NPSIZES]; - hpa_shard_stats_t hpastats; - sec_stats_t secstats; + pac_estats_t estats[SC_NPSIZES]; + hpa_shard_stats_t hpastats; + sec_stats_t secstats; } ctl_arena_stats_t; typedef struct ctl_stats_s { @@ -72,17 +72,17 @@ typedef struct ctl_stats_s { typedef struct ctl_arena_s ctl_arena_t; struct ctl_arena_s { unsigned arena_ind; - bool initialized; + bool initialized; ql_elm(ctl_arena_t) destroyed_link; /* Basic stats, supported even if !config_stats. */ - unsigned nthreads; + unsigned nthreads; const char *dss; - ssize_t dirty_decay_ms; - ssize_t muzzy_decay_ms; - size_t pactive; - size_t pdirty; - size_t pmuzzy; + ssize_t dirty_decay_ms; + ssize_t muzzy_decay_ms; + size_t pactive; + size_t pdirty; + size_t pmuzzy; /* NULL if !config_stats. */ ctl_arena_stats_t *astats; @@ -107,60 +107,67 @@ int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, - size_t *miblenp); -int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, - size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +int ctl_mibnametomib( + tsd_t *tsd, size_t *mib, size_t miblen, const char *name, size_t *miblenp); +int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, + size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(tsdn_t *tsdn); void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn); void ctl_mtx_assert_held(tsdn_t *tsdn); -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ - != 0) { \ - malloc_printf( \ - ": Failure in xmallctl(\"%s\", ...)\n", \ - name); \ - abort(); \ - } \ -} while (0) +#define xmallctl(name, oldp, oldlenp, newp, newlen) \ + do { \ + if (je_mallctl(name, oldp, oldlenp, newp, newlen) != 0) { \ + malloc_printf( \ + ": Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ + } while (0) -#define xmallctlnametomib(name, mibp, miblenp) do { \ - if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ - malloc_printf(": Failure in " \ - "xmallctlnametomib(\"%s\", ...)\n", name); \ - abort(); \ - } \ -} while (0) +#define xmallctlnametomib(name, mibp, miblenp) \ + do { \ + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf( \ + ": Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ + } while (0) -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ - newlen) != 0) { \ - malloc_write( \ - ": Failure in xmallctlbymib()\n"); \ - abort(); \ - } \ -} while (0) +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) \ + do { \ + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_write( \ + ": Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ + } while (0) -#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \ - if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \ - != 0) { \ - malloc_write( \ - ": Failure in ctl_mibnametomib()\n"); \ - abort(); \ - } \ -} while (0) +#define xmallctlmibnametomib(mib, miblen, name, miblenp) \ + do { \ + if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \ + != 0) { \ + malloc_write( \ + ": Failure in ctl_mibnametomib()\n"); \ + abort(); \ + } \ + } while (0) -#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \ - newp, newlen) do { \ - if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \ - oldp, oldlenp, newp, newlen) != 0) { \ - malloc_write( \ - ": Failure in ctl_bymibname()\n"); \ - abort(); \ - } \ -} while (0) +#define xmallctlbymibname( \ + mib, miblen, name, miblenp, oldp, oldlenp, newp, newlen) \ + do { \ + if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \ + oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_write( \ + ": Failure in ctl_bymibname()\n"); \ + abort(); \ + } \ + } while (0) #endif /* JEMALLOC_INTERNAL_CTL_H */ diff --git a/include/jemalloc/internal/decay.h b/include/jemalloc/internal/decay.h index 74be55da..e8773655 100644 --- a/include/jemalloc/internal/decay.h +++ b/include/jemalloc/internal/decay.h @@ -5,7 +5,7 @@ #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/smoothstep.h" -#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1) +#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t) - 1) /* * The decay_t computes the number of pages we should purge at any given time. @@ -168,12 +168,12 @@ void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms); /* * Compute how many of 'npages_new' pages we would need to purge in 'time'. */ -uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time, - size_t npages_new); +uint64_t decay_npages_purge_in( + decay_t *decay, nstime_t *time, size_t npages_new); /* Returns true if the epoch advanced and there are pages to purge. */ -bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, - size_t current_npages); +bool decay_maybe_advance_epoch( + decay_t *decay, nstime_t *new_time, size_t current_npages); /* * Calculates wait time until a number of pages in the interval @@ -182,7 +182,7 @@ bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, * Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of * indefinite wait. */ -uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current, - uint64_t npages_threshold); +uint64_t decay_ns_until_purge( + decay_t *decay, size_t npages_current, uint64_t npages_threshold); #endif /* JEMALLOC_INTERNAL_DECAY_H */ diff --git a/include/jemalloc/internal/ecache.h b/include/jemalloc/internal/ecache.h index 2bd74fde..605733b5 100644 --- a/include/jemalloc/internal/ecache.h +++ b/include/jemalloc/internal/ecache.h @@ -9,8 +9,8 @@ typedef struct ecache_s ecache_t; struct ecache_s { malloc_mutex_t mtx; - eset_t eset; - eset_t guarded_eset; + eset_t eset; + eset_t guarded_eset; /* All stored extents must be in the same state. */ extent_state_t state; /* The index of the ehooks the ecache is associated with. */ @@ -24,22 +24,22 @@ struct ecache_s { static inline size_t ecache_npages_get(ecache_t *ecache) { - return eset_npages_get(&ecache->eset) + - eset_npages_get(&ecache->guarded_eset); + return eset_npages_get(&ecache->eset) + + eset_npages_get(&ecache->guarded_eset); } /* Get the number of extents in the given page size index. */ static inline size_t ecache_nextents_get(ecache_t *ecache, pszind_t ind) { - return eset_nextents_get(&ecache->eset, ind) + - eset_nextents_get(&ecache->guarded_eset, ind); + return eset_nextents_get(&ecache->eset, ind) + + eset_nextents_get(&ecache->guarded_eset, ind); } /* Get the sum total bytes of the extents in the given page size index. */ static inline size_t ecache_nbytes_get(ecache_t *ecache, pszind_t ind) { - return eset_nbytes_get(&ecache->eset, ind) + - eset_nbytes_get(&ecache->guarded_eset, ind); + return eset_nbytes_get(&ecache->eset, ind) + + eset_nbytes_get(&ecache->guarded_eset, ind); } static inline unsigned diff --git a/include/jemalloc/internal/edata.h b/include/jemalloc/internal/edata.h index e41e4efa..2b229e7d 100644 --- a/include/jemalloc/internal/edata.h +++ b/include/jemalloc/internal/edata.h @@ -30,9 +30,9 @@ #define ESET_ENUMERATE_MAX_NUM 32 enum extent_state_e { - extent_state_active = 0, - extent_state_dirty = 1, - extent_state_muzzy = 2, + extent_state_active = 0, + extent_state_dirty = 1, + extent_state_muzzy = 2, extent_state_retained = 3, extent_state_transition = 4, /* States below are intermediate. */ extent_state_merging = 5, @@ -42,7 +42,7 @@ typedef enum extent_state_e extent_state_t; enum extent_head_state_e { EXTENT_NOT_HEAD, - EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */ + EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */ }; typedef enum extent_head_state_e extent_head_state_t; @@ -50,25 +50,22 @@ typedef enum extent_head_state_e extent_head_state_t; * Which implementation of the page allocator interface, (PAI, defined in * pai.h) owns the given extent? */ -enum extent_pai_e { - EXTENT_PAI_PAC = 0, - EXTENT_PAI_HPA = 1 -}; +enum extent_pai_e { EXTENT_PAI_PAC = 0, EXTENT_PAI_HPA = 1 }; typedef enum extent_pai_e extent_pai_t; struct e_prof_info_s { /* Time when this was allocated. */ - nstime_t e_prof_alloc_time; + nstime_t e_prof_alloc_time; /* Allocation request size. */ - size_t e_prof_alloc_size; + size_t e_prof_alloc_size; /* Points to a prof_tctx_t. */ - atomic_p_t e_prof_tctx; + atomic_p_t e_prof_tctx; /* * Points to a prof_recent_t for the allocation; NULL * means the recent allocation record no longer exists. * Protected by prof_recent_alloc_mtx. */ - atomic_p_t e_prof_recent_alloc; + atomic_p_t e_prof_recent_alloc; }; typedef struct e_prof_info_s e_prof_info_t; @@ -85,13 +82,13 @@ typedef struct e_prof_info_s e_prof_info_t; */ typedef struct edata_map_info_s edata_map_info_t; struct edata_map_info_s { - bool slab; + bool slab; szind_t szind; }; typedef struct edata_cmp_summary_s edata_cmp_summary_t; struct edata_cmp_summary_s { - uint64_t sn; + uint64_t sn; uintptr_t addr; }; @@ -149,55 +146,72 @@ struct edata_s { * * bin_shard: the shard of the bin from which this extent came. */ - uint64_t e_bits; -#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) + uint64_t e_bits; +#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) \ + ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) \ + << (CURRENT_FIELD_SHIFT)) -#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS -#define EDATA_BITS_ARENA_SHIFT 0 -#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT) +#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS +#define EDATA_BITS_ARENA_SHIFT 0 +#define EDATA_BITS_ARENA_MASK \ + MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT) -#define EDATA_BITS_SLAB_WIDTH 1 -#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT) -#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT) +#define EDATA_BITS_SLAB_WIDTH 1 +#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT) +#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT) -#define EDATA_BITS_COMMITTED_WIDTH 1 -#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT) -#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT) +#define EDATA_BITS_COMMITTED_WIDTH 1 +#define EDATA_BITS_COMMITTED_SHIFT \ + (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT) +#define EDATA_BITS_COMMITTED_MASK \ + MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT) -#define EDATA_BITS_PAI_WIDTH 1 -#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT) -#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT) +#define EDATA_BITS_PAI_WIDTH 1 +#define EDATA_BITS_PAI_SHIFT \ + (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT) +#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT) -#define EDATA_BITS_ZEROED_WIDTH 1 -#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT) -#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) +#define EDATA_BITS_ZEROED_WIDTH 1 +#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT) +#define EDATA_BITS_ZEROED_MASK \ + MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) -#define EDATA_BITS_GUARDED_WIDTH 1 -#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT) -#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT) +#define EDATA_BITS_GUARDED_WIDTH 1 +#define EDATA_BITS_GUARDED_SHIFT \ + (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT) +#define EDATA_BITS_GUARDED_MASK \ + MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT) -#define EDATA_BITS_STATE_WIDTH 3 -#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT) -#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT) +#define EDATA_BITS_STATE_WIDTH 3 +#define EDATA_BITS_STATE_SHIFT \ + (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT) +#define EDATA_BITS_STATE_MASK \ + MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT) -#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) -#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT) -#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT) +#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) +#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT) +#define EDATA_BITS_SZIND_MASK \ + MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT) -#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1) -#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT) -#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT) +#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1) +#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT) +#define EDATA_BITS_NFREE_MASK \ + MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT) -#define EDATA_BITS_BINSHARD_WIDTH 6 -#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT) -#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT) +#define EDATA_BITS_BINSHARD_WIDTH 6 +#define EDATA_BITS_BINSHARD_SHIFT \ + (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT) +#define EDATA_BITS_BINSHARD_MASK \ + MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT) #define EDATA_BITS_IS_HEAD_WIDTH 1 -#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT) -#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT) +#define EDATA_BITS_IS_HEAD_SHIFT \ + (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT) +#define EDATA_BITS_IS_HEAD_MASK \ + MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT) /* Pointer to the extent that this structure is responsible for. */ - void *e_addr; + void *e_addr; union { /* @@ -207,11 +221,11 @@ struct edata_s { * * ssssssss [...] ssssssss ssssnnnn nnnnnnnn */ - size_t e_size_esn; - #define EDATA_SIZE_MASK ((size_t)~(PAGE-1)) - #define EDATA_ESN_MASK ((size_t)PAGE-1) + size_t e_size_esn; +#define EDATA_SIZE_MASK ((size_t) ~(PAGE - 1)) +#define EDATA_ESN_MASK ((size_t)PAGE - 1) /* Base extent size, which may not be a multiple of PAGE. */ - size_t e_bsize; + size_t e_bsize; }; /* @@ -232,7 +246,7 @@ struct edata_s { * List linkage used when the edata_t is active; either in * arena's large allocations or bin_t's slabs_full. */ - ql_elm(edata_t) ql_link_active; + ql_elm(edata_t) ql_link_active; /* * Pairing heap linkage. Used whenever the extent is inactive * (in the page allocators), or when it is active and in @@ -240,7 +254,7 @@ struct edata_s { * extent and sitting in an edata_cache. */ union { - edata_heap_link_t heap_link; + edata_heap_link_t heap_link; edata_avail_link_t avail_link; }; }; @@ -253,10 +267,10 @@ struct edata_s { */ ql_elm(edata_t) ql_link_inactive; /* Small region slab metadata. */ - slab_data_t e_slab_data; + slab_data_t e_slab_data; /* Profiling data, used for large objects. */ - e_prof_info_t e_prof_info; + e_prof_info_t e_prof_info; }; }; @@ -265,8 +279,8 @@ TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive) static inline unsigned edata_arena_ind_get(const edata_t *edata) { - unsigned arena_ind = (unsigned)((edata->e_bits & - EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT); + unsigned arena_ind = (unsigned)((edata->e_bits & EDATA_BITS_ARENA_MASK) + >> EDATA_BITS_ARENA_SHIFT); assert(arena_ind < MALLOCX_ARENA_LIMIT); return arena_ind; @@ -274,8 +288,8 @@ edata_arena_ind_get(const edata_t *edata) { static inline szind_t edata_szind_get_maybe_invalid(const edata_t *edata) { - szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >> - EDATA_BITS_SZIND_SHIFT); + szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) + >> EDATA_BITS_SZIND_SHIFT); assert(szind <= SC_NSIZES); return szind; } @@ -318,8 +332,8 @@ edata_usize_get(const edata_t *edata) { if (!sz_large_size_classes_disabled() || szind < SC_NBINS) { size_t usize_from_ind = sz_index2size(szind); - if (!sz_large_size_classes_disabled() && - usize_from_ind >= SC_LARGE_MINCLASS) { + if (!sz_large_size_classes_disabled() + && usize_from_ind >= SC_LARGE_MINCLASS) { size_t size = (edata->e_size_esn & EDATA_SIZE_MASK); assert(size > sz_large_pad); size_t usize_from_size = size - sz_large_pad; @@ -341,8 +355,9 @@ edata_usize_get(const edata_t *edata) { static inline unsigned edata_binshard_get(const edata_t *edata) { - unsigned binshard = (unsigned)((edata->e_bits & - EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT); + unsigned binshard = (unsigned)((edata->e_bits + & EDATA_BITS_BINSHARD_MASK) + >> EDATA_BITS_BINSHARD_SHIFT); assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); return binshard; } @@ -354,58 +369,58 @@ edata_sn_get(const edata_t *edata) { static inline extent_state_t edata_state_get(const edata_t *edata) { - return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >> - EDATA_BITS_STATE_SHIFT); + return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) + >> EDATA_BITS_STATE_SHIFT); } static inline bool edata_guarded_get(const edata_t *edata) { - return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >> - EDATA_BITS_GUARDED_SHIFT); + return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) + >> EDATA_BITS_GUARDED_SHIFT); } static inline bool edata_zeroed_get(const edata_t *edata) { - return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >> - EDATA_BITS_ZEROED_SHIFT); + return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) + >> EDATA_BITS_ZEROED_SHIFT); } static inline bool edata_committed_get(const edata_t *edata) { - return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >> - EDATA_BITS_COMMITTED_SHIFT); + return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) + >> EDATA_BITS_COMMITTED_SHIFT); } static inline extent_pai_t edata_pai_get(const edata_t *edata) { - return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >> - EDATA_BITS_PAI_SHIFT); + return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) + >> EDATA_BITS_PAI_SHIFT); } static inline bool edata_slab_get(const edata_t *edata) { - return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >> - EDATA_BITS_SLAB_SHIFT); + return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) + >> EDATA_BITS_SLAB_SHIFT); } static inline unsigned edata_nfree_get(const edata_t *edata) { assert(edata_slab_get(edata)); - return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >> - EDATA_BITS_NFREE_SHIFT); + return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) + >> EDATA_BITS_NFREE_SHIFT); } static inline void * edata_base_get(const edata_t *edata) { - assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || - !edata_slab_get(edata)); + assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) + || !edata_slab_get(edata)); return PAGE_ADDR2BASE(edata->e_addr); } static inline void * edata_addr_get(const edata_t *edata) { - assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || - !edata_slab_get(edata)); + assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) + || !edata_slab_get(edata)); return edata->e_addr; } @@ -437,14 +452,14 @@ edata_before_get(const edata_t *edata) { static inline void * edata_last_get(const edata_t *edata) { - return (void *)((byte_t *)edata_base_get(edata) + - edata_size_get(edata) - PAGE); + return (void *)((byte_t *)edata_base_get(edata) + edata_size_get(edata) + - PAGE); } static inline void * edata_past_get(const edata_t *edata) { - return (void *)((byte_t *)edata_base_get(edata) + - edata_size_get(edata)); + return ( + void *)((byte_t *)edata_base_get(edata) + edata_size_get(edata)); } static inline slab_data_t * @@ -461,8 +476,8 @@ edata_slab_data_get_const(const edata_t *edata) { static inline prof_tctx_t * edata_prof_tctx_get(const edata_t *edata) { - return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx, - ATOMIC_ACQUIRE); + return (prof_tctx_t *)atomic_load_p( + &edata->e_prof_info.e_prof_tctx, ATOMIC_ACQUIRE); } static inline const nstime_t * @@ -483,16 +498,16 @@ edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) { static inline void edata_arena_ind_set(edata_t *edata, unsigned arena_ind) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) | - ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) + | ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT); } static inline void edata_binshard_set(edata_t *edata, unsigned binshard) { /* The assertion assumes szind is set already. */ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); - edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) | - ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) + | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT); } static inline void @@ -508,8 +523,8 @@ edata_size_set(edata_t *edata, size_t size) { static inline void edata_esn_set(edata_t *edata, size_t esn) { - edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn & - EDATA_ESN_MASK); + edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) + | (esn & EDATA_ESN_MASK); } static inline void @@ -526,25 +541,26 @@ edata_ps_set(edata_t *edata, hpdata_t *ps) { static inline void edata_szind_set(edata_t *edata, szind_t szind) { assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ - edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) | - ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) + | ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT); } static inline void edata_nfree_set(edata_t *edata, unsigned nfree) { assert(edata_slab_get(edata)); - edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) | - ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) + | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); } static inline void edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) { /* The assertion assumes szind is set already. */ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); - edata->e_bits = (edata->e_bits & - (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) | - ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) | - ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); + edata->e_bits = (edata->e_bits + & (~EDATA_BITS_NFREE_MASK + & ~EDATA_BITS_BINSHARD_MASK)) + | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) + | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); } static inline void @@ -572,38 +588,38 @@ edata_sn_set(edata_t *edata, uint64_t sn) { static inline void edata_state_set(edata_t *edata, extent_state_t state) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) | - ((uint64_t)state << EDATA_BITS_STATE_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) + | ((uint64_t)state << EDATA_BITS_STATE_SHIFT); } static inline void edata_guarded_set(edata_t *edata, bool guarded) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) | - ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) + | ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT); } static inline void edata_zeroed_set(edata_t *edata, bool zeroed) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) | - ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) + | ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT); } static inline void edata_committed_set(edata_t *edata, bool committed) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) | - ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) + | ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT); } static inline void edata_pai_set(edata_t *edata, extent_pai_t pai) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) | - ((uint64_t)pai << EDATA_BITS_PAI_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) + | ((uint64_t)pai << EDATA_BITS_PAI_SHIFT); } static inline void edata_slab_set(edata_t *edata, bool slab) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) | - ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) + | ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT); } static inline void @@ -622,22 +638,22 @@ edata_prof_alloc_size_set(edata_t *edata, size_t size) { } static inline void -edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata, - prof_recent_t *recent_alloc) { +edata_prof_recent_alloc_set_dont_call_directly( + edata_t *edata, prof_recent_t *recent_alloc) { atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc, ATOMIC_RELAXED); } static inline bool edata_is_head_get(edata_t *edata) { - return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >> - EDATA_BITS_IS_HEAD_SHIFT); + return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) + >> EDATA_BITS_IS_HEAD_SHIFT); } static inline void edata_is_head_set(edata_t *edata, bool is_head) { - edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) | - ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) + | ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT); } static inline bool @@ -676,8 +692,8 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size, } static inline void -edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn, - bool reused) { +edata_binit( + edata_t *edata, void *addr, size_t bsize, uint64_t sn, bool reused) { edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1); edata_addr_set(edata, addr); edata_bsize_set(edata, bsize); @@ -729,11 +745,13 @@ edata_cmp_summary_encode(edata_cmp_summary_t src) { static inline int edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) { - unsigned __int128 a_encoded = edata_cmp_summary_encode(a); - unsigned __int128 b_encoded = edata_cmp_summary_encode(b); - if (a_encoded < b_encoded) return -1; - if (a_encoded == b_encoded) return 0; - return 1; + unsigned __int128 a_encoded = edata_cmp_summary_encode(a); + unsigned __int128 b_encoded = edata_cmp_summary_encode(b); + if (a_encoded < b_encoded) + return -1; + if (a_encoded == b_encoded) + return 0; + return 1; } #else static inline int @@ -750,8 +768,8 @@ edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) { * prediction accuracy is not great. As a result, this implementation * is measurably faster (by around 30%). */ - return (2 * ((a.sn > b.sn) - (a.sn < b.sn))) + - ((a.addr > b.addr) - (a.addr < b.addr)); + return (2 * ((a.sn > b.sn) - (a.sn < b.sn))) + + ((a.addr > b.addr) - (a.addr < b.addr)); } #endif @@ -772,7 +790,6 @@ edata_esnead_comp(const edata_t *a, const edata_t *b) { return (2 * edata_esn_comp(a, b)) + edata_ead_comp(a, b); } -ph_proto(, edata_avail, edata_t) -ph_proto(, edata_heap, edata_t) +ph_proto(, edata_avail, edata_t) ph_proto(, edata_heap, edata_t) #endif /* JEMALLOC_INTERNAL_EDATA_H */ diff --git a/include/jemalloc/internal/edata_cache.h b/include/jemalloc/internal/edata_cache.h index b2c7b4f1..d92d90cb 100644 --- a/include/jemalloc/internal/edata_cache.h +++ b/include/jemalloc/internal/edata_cache.h @@ -15,13 +15,13 @@ typedef struct edata_cache_s edata_cache_t; struct edata_cache_s { - edata_avail_t avail; - atomic_zu_t count; + edata_avail_t avail; + atomic_zu_t count; malloc_mutex_t mtx; - base_t *base; + base_t *base; }; -bool edata_cache_init(edata_cache_t *edata_cache, base_t *base); +bool edata_cache_init(edata_cache_t *edata_cache, base_t *base); edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache); void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata); @@ -37,14 +37,14 @@ void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache); typedef struct edata_cache_fast_s edata_cache_fast_t; struct edata_cache_fast_s { edata_list_inactive_t list; - edata_cache_t *fallback; - bool disabled; + edata_cache_t *fallback; + bool disabled; }; void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback); edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs); -void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, - edata_t *edata); +void edata_cache_fast_put( + tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata); void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs); #endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */ diff --git a/include/jemalloc/internal/ehooks.h b/include/jemalloc/internal/ehooks.h index 947e056c..c65e189a 100644 --- a/include/jemalloc/internal/ehooks.h +++ b/include/jemalloc/internal/ehooks.h @@ -46,10 +46,10 @@ extern const extent_hooks_t ehooks_default_extent_hooks; */ void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); -bool ehooks_default_dalloc_impl(void *addr, size_t size); -void ehooks_default_destroy_impl(void *addr, size_t size); -bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length); -bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length); +bool ehooks_default_dalloc_impl(void *addr, size_t size); +void ehooks_default_destroy_impl(void *addr, size_t size); +bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length); +bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length); #ifdef PAGES_CAN_PURGE_LAZY bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length); #endif @@ -116,8 +116,8 @@ ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) { static inline bool ehooks_are_default(ehooks_t *ehooks) { - return ehooks_get_extent_hooks_ptr(ehooks) == - &ehooks_default_extent_hooks; + return ehooks_get_extent_hooks_ptr(ehooks) + == &ehooks_default_extent_hooks; } /* @@ -189,16 +189,15 @@ ehooks_debug_zero_check(void *addr, size_t size) { } } - static inline void * ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { - bool orig_zero = *zero; - void *ret; + bool orig_zero = *zero; + void *ret; extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); if (extent_hooks == &ehooks_default_extent_hooks) { - ret = ehooks_default_alloc_impl(tsdn, new_addr, size, - alignment, zero, commit, ehooks_ind_get(ehooks)); + ret = ehooks_default_alloc_impl(tsdn, new_addr, size, alignment, + zero, commit, ehooks_ind_get(ehooks)); } else { ehooks_pre_reentrancy(tsdn); ret = extent_hooks->alloc(extent_hooks, new_addr, size, @@ -214,8 +213,8 @@ ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size, } static inline bool -ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, - bool committed) { +ehooks_dalloc( + tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, bool committed) { extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); if (extent_hooks == &ehooks_default_extent_hooks) { return ehooks_default_dalloc_impl(addr, size); @@ -231,8 +230,8 @@ ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, } static inline void -ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, - bool committed) { +ehooks_destroy( + tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, bool committed) { extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); if (extent_hooks == &ehooks_default_extent_hooks) { ehooks_default_destroy_impl(addr, size); @@ -250,15 +249,15 @@ static inline bool ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, size_t offset, size_t length) { extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); - bool err; + bool err; if (extent_hooks == &ehooks_default_extent_hooks) { err = ehooks_default_commit_impl(addr, offset, length); } else if (extent_hooks->commit == NULL) { err = true; } else { ehooks_pre_reentrancy(tsdn); - err = extent_hooks->commit(extent_hooks, addr, size, - offset, length, ehooks_ind_get(ehooks)); + err = extent_hooks->commit(extent_hooks, addr, size, offset, + length, ehooks_ind_get(ehooks)); ehooks_post_reentrancy(tsdn); } if (!err) { @@ -384,7 +383,7 @@ ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) { static inline bool ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) { - bool err; + bool err; extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); if (extent_hooks == &ehooks_default_extent_hooks) { @@ -399,7 +398,7 @@ ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) { static inline bool ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) { - bool err; + bool err; extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); if (extent_hooks == &ehooks_default_extent_hooks) { diff --git a/include/jemalloc/internal/emap.h b/include/jemalloc/internal/emap.h index fba46abe..88692356 100644 --- a/include/jemalloc/internal/emap.h +++ b/include/jemalloc/internal/emap.h @@ -10,9 +10,9 @@ * EMAP_DECLARE_RTREE_CTX; * in uses will avoid empty-statement warnings. */ -#define EMAP_DECLARE_RTREE_CTX \ - rtree_ctx_t rtree_ctx_fallback; \ - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback) +#define EMAP_DECLARE_RTREE_CTX \ + rtree_ctx_t rtree_ctx_fallback; \ + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback) typedef struct emap_s emap_t; struct emap_s { @@ -22,25 +22,25 @@ struct emap_s { /* Used to pass rtree lookup context down the path. */ typedef struct emap_alloc_ctx_s emap_alloc_ctx_t; struct emap_alloc_ctx_s { - size_t usize; + size_t usize; szind_t szind; - bool slab; + bool slab; }; typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t; struct emap_full_alloc_ctx_s { - szind_t szind; - bool slab; + szind_t szind; + bool slab; edata_t *edata; }; bool emap_init(emap_t *emap, base_t *base, bool zeroed); -void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, - bool slab); +void emap_remap( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab); -void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - extent_state_t state); +void emap_update_edata_state( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t state); /* * The two acquire functions below allow accessing neighbor edatas, if it's safe @@ -62,16 +62,16 @@ edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, bool forward); edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_pai_t pai, extent_state_t expected_state); -void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - extent_state_t new_state); +void emap_release_edata( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t new_state); /* * Associate the given edata with its beginning and end address, setting the * szind and slab info appropriately. * Returns true on error (i.e. resource exhaustion). */ -bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - szind_t szind, bool slab); +bool emap_register_boundary( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab); /* * Does the same thing, but with the interior of the range, for slab @@ -92,8 +92,8 @@ bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata, * touched, so no allocation is necessary to fill the interior once the boundary * has been touched. */ -void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - szind_t szind); +void emap_register_interior( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind); void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata); void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata); @@ -161,8 +161,8 @@ emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { emap_assert_mapped(tsdn, emap, edata); EMAP_DECLARE_RTREE_CTX; - rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, - (uintptr_t)edata_base_get(edata)); + rtree_contents_t contents = rtree_read( + tsdn, &emap->rtree, rtree_ctx, (uintptr_t)edata_base_get(edata)); return edata_state_in_transition(contents.metadata.state); } @@ -194,9 +194,9 @@ emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { } rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm, /* dependent */ false); - if (contents.edata == NULL || - contents.metadata.state == extent_state_active || - edata_state_in_transition(contents.metadata.state)) { + if (contents.edata == NULL + || contents.metadata.state == extent_state_active + || edata_state_in_transition(contents.metadata.state)) { return true; } @@ -211,8 +211,8 @@ extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) { assert(edata_state_get(inner) == extent_state_active); assert(edata_state_get(outer) == extent_state_merging); assert(!edata_guarded_get(inner) && !edata_guarded_get(outer)); - assert(edata_base_get(inner) == edata_past_get(outer) || - edata_base_get(outer) == edata_past_get(inner)); + assert(edata_base_get(inner) == edata_past_get(outer) + || edata_base_get(outer) == edata_past_get(inner)); } JEMALLOC_ALWAYS_INLINE void @@ -232,13 +232,13 @@ emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) { } JEMALLOC_ALWAYS_INLINE void -emap_alloc_ctx_init(emap_alloc_ctx_t *alloc_ctx, szind_t szind, bool slab, - size_t usize) { +emap_alloc_ctx_init( + emap_alloc_ctx_t *alloc_ctx, szind_t szind, bool slab, size_t usize) { alloc_ctx->szind = szind; alloc_ctx->slab = slab; alloc_ctx->usize = usize; - assert(sz_large_size_classes_disabled() || - usize == sz_index2size(szind)); + assert( + sz_large_size_classes_disabled() || usize == sz_index2size(szind)); } JEMALLOC_ALWAYS_INLINE size_t @@ -248,27 +248,29 @@ emap_alloc_ctx_usize_get(emap_alloc_ctx_t *alloc_ctx) { assert(alloc_ctx->usize == sz_index2size(alloc_ctx->szind)); return sz_index2size(alloc_ctx->szind); } - assert(sz_large_size_classes_disabled() || - alloc_ctx->usize == sz_index2size(alloc_ctx->szind)); + assert(sz_large_size_classes_disabled() + || alloc_ctx->usize == sz_index2size(alloc_ctx->szind)); assert(alloc_ctx->usize <= SC_LARGE_MAXCLASS); return alloc_ctx->usize; } /* Fills in alloc_ctx with the info in the map. */ JEMALLOC_ALWAYS_INLINE void -emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, - emap_alloc_ctx_t *alloc_ctx) { +emap_alloc_ctx_lookup( + tsdn_t *tsdn, emap_t *emap, const void *ptr, emap_alloc_ctx_t *alloc_ctx) { EMAP_DECLARE_RTREE_CTX; - rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, - rtree_ctx, (uintptr_t)ptr); + rtree_contents_t contents = rtree_read( + tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr); /* * If the alloc is invalid, do not calculate usize since edata * could be corrupted. */ emap_alloc_ctx_init(alloc_ctx, contents.metadata.szind, - contents.metadata.slab, (contents.metadata.szind == SC_NSIZES - || contents.edata == NULL)? 0: edata_usize_get(contents.edata)); + contents.metadata.slab, + (contents.metadata.szind == SC_NSIZES || contents.edata == NULL) + ? 0 + : edata_usize_get(contents.edata)); } /* The pointer must be mapped. */ @@ -277,8 +279,8 @@ emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, emap_full_alloc_ctx_t *full_alloc_ctx) { EMAP_DECLARE_RTREE_CTX; - rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, - (uintptr_t)ptr); + rtree_contents_t contents = rtree_read( + tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr); full_alloc_ctx->edata = contents.edata; full_alloc_ctx->szind = contents.metadata.szind; full_alloc_ctx->slab = contents.metadata.slab; @@ -295,8 +297,8 @@ emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, EMAP_DECLARE_RTREE_CTX; rtree_contents_t contents; - bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx, - (uintptr_t)ptr, &contents); + bool err = rtree_read_independent( + tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr, &contents); if (err) { return true; } @@ -311,14 +313,14 @@ emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, * fast path, e.g. when the metadata key is not cached. */ JEMALLOC_ALWAYS_INLINE bool -emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr, - emap_alloc_ctx_t *alloc_ctx) { +emap_alloc_ctx_try_lookup_fast( + tsd_t *tsd, emap_t *emap, const void *ptr, emap_alloc_ctx_t *alloc_ctx) { /* Use the unsafe getter since this may gets called during exit. */ rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd); rtree_metadata_t metadata; - bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree, - rtree_ctx, (uintptr_t)ptr, &metadata); + bool err = rtree_metadata_try_read_fast( + tsd_tsdn(tsd), &emap->rtree, rtree_ctx, (uintptr_t)ptr, &metadata); if (err) { return true; } @@ -345,11 +347,12 @@ typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind); * This allows size-checking assertions, which we can only do while we're in the * process of edata lookups. */ -typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx); +typedef void (*emap_metadata_visitor)( + void *ctx, emap_full_alloc_ctx_t *alloc_ctx); typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t; union emap_batch_lookup_result_u { - edata_t *edata; + edata_t *edata; rtree_leaf_elm_t *rtree_leaf; }; @@ -375,8 +378,8 @@ emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs, for (size_t i = 0; i < nptrs; i++) { rtree_leaf_elm_t *elm = result[i].rtree_leaf; - rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd), - &emap->rtree, elm, /* dependent */ true); + rtree_contents_t contents = rtree_leaf_elm_read( + tsd_tsdn(tsd), &emap->rtree, elm, /* dependent */ true); result[i].edata = contents.edata; emap_full_alloc_ctx_t alloc_ctx; /* diff --git a/include/jemalloc/internal/emitter.h b/include/jemalloc/internal/emitter.h index 11153254..a4073e6a 100644 --- a/include/jemalloc/internal/emitter.h +++ b/include/jemalloc/internal/emitter.h @@ -44,18 +44,18 @@ typedef struct emitter_col_s emitter_col_t; struct emitter_col_s { /* Filled in by the user. */ emitter_justify_t justify; - int width; - emitter_type_t type; + int width; + emitter_type_t type; union { - bool bool_val; - int int_val; - unsigned unsigned_val; - uint32_t uint32_val; - uint32_t uint32_t_val; - uint64_t uint64_val; - uint64_t uint64_t_val; - size_t size_val; - ssize_t ssize_val; + bool bool_val; + int int_val; + unsigned unsigned_val; + uint32_t uint32_val; + uint32_t uint32_t_val; + uint64_t uint64_val; + uint64_t uint64_t_val; + size_t size_val; + ssize_t ssize_val; const char *str_val; }; @@ -73,8 +73,8 @@ struct emitter_s { emitter_output_t output; /* The output information. */ write_cb_t *write_cb; - void *cbopaque; - int nesting_depth; + void *cbopaque; + int nesting_depth; /* True if we've already emitted a value at the given depth. */ bool item_at_depth; /* True if we emitted a key and will emit corresponding value next. */ @@ -83,8 +83,8 @@ struct emitter_s { static inline bool emitter_outputs_json(emitter_t *emitter) { - return emitter->output == emitter_output_json || - emitter->output == emitter_output_json_compact; + return emitter->output == emitter_output_json + || emitter->output == emitter_output_json_compact; } /* Internal convenience function. Write to the emitter the given string. */ @@ -98,23 +98,23 @@ emitter_printf(emitter_t *emitter, const char *format, ...) { va_end(ap); } -static inline const char * JEMALLOC_FORMAT_ARG(3) -emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, - emitter_justify_t justify, int width) { +static inline const char * +JEMALLOC_FORMAT_ARG(3) emitter_gen_fmt(char *out_fmt, size_t out_size, + const char *fmt_specifier, emitter_justify_t justify, int width) { size_t written; fmt_specifier++; if (justify == emitter_justify_none) { - written = malloc_snprintf(out_fmt, out_size, - "%%%s", fmt_specifier); + written = malloc_snprintf( + out_fmt, out_size, "%%%s", fmt_specifier); } else if (justify == emitter_justify_left) { - written = malloc_snprintf(out_fmt, out_size, - "%%-%d%s", width, fmt_specifier); + written = malloc_snprintf( + out_fmt, out_size, "%%-%d%s", width, fmt_specifier); } else { - written = malloc_snprintf(out_fmt, out_size, - "%%%d%s", width, fmt_specifier); + written = malloc_snprintf( + out_fmt, out_size, "%%%d%s", width, fmt_specifier); } /* Only happens in case of bad format string, which *we* choose. */ - assert(written < out_size); + assert(written < out_size); return out_fmt; } @@ -122,10 +122,10 @@ static inline void emitter_emit_str(emitter_t *emitter, emitter_justify_t justify, int width, char *fmt, size_t fmt_size, const char *str) { #define BUF_SIZE 256 - char buf[BUF_SIZE]; + char buf[BUF_SIZE]; size_t str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", str); - emitter_printf(emitter, - emitter_gen_fmt(fmt, fmt_size, "%s", justify, width), buf); + emitter_printf( + emitter, emitter_gen_fmt(fmt, fmt_size, "%s", justify, width), buf); if (str_written < BUF_SIZE) { return; } @@ -168,16 +168,16 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, */ char fmt[FMT_SIZE]; -#define EMIT_SIMPLE(type, format) \ - emitter_printf(emitter, \ - emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \ +#define EMIT_SIMPLE(type, format) \ + emitter_printf(emitter, \ + emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \ *(const type *)value); switch (value_type) { case emitter_type_bool: emitter_printf(emitter, emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), - *(const bool *)value ? "true" : "false"); + *(const bool *)value ? "true" : "false"); break; case emitter_type_int: EMIT_SIMPLE(int, "%d") @@ -213,7 +213,6 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, #undef FMT_SIZE } - /* Internal functions. In json mode, tracks nesting state. */ static inline void emitter_nest_inc(emitter_t *emitter) { @@ -229,7 +228,7 @@ emitter_nest_dec(emitter_t *emitter) { static inline void emitter_indent(emitter_t *emitter) { - int amount = emitter->nesting_depth; + int amount = emitter->nesting_depth; const char *indent_str; assert(emitter->output != emitter_output_json_compact); if (emitter->output == emitter_output_json) { @@ -291,12 +290,12 @@ emitter_json_key(emitter_t *emitter, const char *json_key) { } static inline void -emitter_json_value(emitter_t *emitter, emitter_type_t value_type, - const void *value) { +emitter_json_value( + emitter_t *emitter, emitter_type_t value_type, const void *value) { if (emitter_outputs_json(emitter)) { emitter_json_key_prefix(emitter); - emitter_print_value(emitter, emitter_justify_none, -1, - value_type, value); + emitter_print_value( + emitter, emitter_justify_none, -1, value_type, value); emitter->item_at_depth = true; } } @@ -367,7 +366,6 @@ emitter_json_object_end(emitter_t *emitter) { } } - /******************************************************************************/ /* Table public API. */ @@ -389,14 +387,13 @@ emitter_table_dict_end(emitter_t *emitter) { static inline void emitter_table_kv_note(emitter_t *emitter, const char *table_key, - emitter_type_t value_type, const void *value, - const char *table_note_key, emitter_type_t table_note_value_type, - const void *table_note_value) { + emitter_type_t value_type, const void *value, const char *table_note_key, + emitter_type_t table_note_value_type, const void *table_note_value) { if (emitter->output == emitter_output_table) { emitter_indent(emitter); emitter_printf(emitter, "%s: ", table_key); - emitter_print_value(emitter, emitter_justify_none, -1, - value_type, value); + emitter_print_value( + emitter, emitter_justify_none, -1, value_type, value); if (table_note_key != NULL) { emitter_printf(emitter, " (%s: ", table_note_key); emitter_print_value(emitter, emitter_justify_none, -1, @@ -415,7 +412,6 @@ emitter_table_kv(emitter_t *emitter, const char *table_key, emitter_type_bool, NULL); } - /* Write to the emitter the given string, but only in table mode. */ JEMALLOC_FORMAT_PRINTF(2, 3) static inline void @@ -423,7 +419,8 @@ emitter_table_printf(emitter_t *emitter, const char *format, ...) { if (emitter->output == emitter_output_table) { va_list ap; va_start(ap, format); - malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); + malloc_vcprintf( + emitter->write_cb, emitter->cbopaque, format, ap); va_end(ap); } } @@ -434,7 +431,7 @@ emitter_table_row(emitter_t *emitter, emitter_row_t *row) { return; } emitter_col_t *col; - ql_foreach(col, &row->cols, link) { + ql_foreach (col, &row->cols, link) { emitter_print_value(emitter, col->justify, col->width, col->type, (const void *)&col->bool_val); } @@ -452,7 +449,6 @@ emitter_col_init(emitter_col_t *col, emitter_row_t *row) { ql_tail_insert(&row->cols, col, link); } - /******************************************************************************/ /* * Generalized public API. Emits using either JSON or table, according to @@ -464,9 +460,8 @@ emitter_col_init(emitter_col_t *col, emitter_row_t *row) { */ static inline void emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, - emitter_type_t value_type, const void *value, - const char *table_note_key, emitter_type_t table_note_value_type, - const void *table_note_value) { + emitter_type_t value_type, const void *value, const char *table_note_key, + emitter_type_t table_note_value_type, const void *table_note_value) { if (emitter_outputs_json(emitter)) { emitter_json_key(emitter, json_key); emitter_json_value(emitter, value_type, value); @@ -485,8 +480,8 @@ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, } static inline void -emitter_dict_begin(emitter_t *emitter, const char *json_key, - const char *table_header) { +emitter_dict_begin( + emitter_t *emitter, const char *json_key, const char *table_header) { if (emitter_outputs_json(emitter)) { emitter_json_key(emitter, json_key); emitter_json_object_begin(emitter); @@ -526,8 +521,9 @@ emitter_end(emitter_t *emitter) { if (emitter_outputs_json(emitter)) { assert(emitter->nesting_depth == 1); emitter_nest_dec(emitter); - emitter_printf(emitter, "%s", emitter->output == - emitter_output_json_compact ? "}" : "\n}\n"); + emitter_printf(emitter, "%s", + emitter->output == emitter_output_json_compact ? "}" + : "\n}\n"); } } diff --git a/include/jemalloc/internal/exp_grow.h b/include/jemalloc/internal/exp_grow.h index 40a1add0..8206ba85 100644 --- a/include/jemalloc/internal/exp_grow.h +++ b/include/jemalloc/internal/exp_grow.h @@ -27,8 +27,7 @@ exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min, *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip); while (*r_alloc_size < alloc_size_min) { (*r_skip)++; - if (exp_grow->next + *r_skip >= - sz_psz2ind(SC_LARGE_MAXCLASS)) { + if (exp_grow->next + *r_skip >= sz_psz2ind(SC_LARGE_MAXCLASS)) { /* Outside legal range. */ return true; } @@ -44,7 +43,6 @@ exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) { } else { exp_grow->next = exp_grow->limit; } - } void exp_grow_init(exp_grow_t *exp_grow); diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index be61db8d..e81dff2c 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -26,9 +26,10 @@ extern size_t opt_process_madvise_max_batch; #ifdef JEMALLOC_HAVE_PROCESS_MADVISE /* The iovec is on stack. Limit the max batch to avoid stack overflow. */ -#define PROCESS_MADVISE_MAX_BATCH_LIMIT (VARIABLE_ARRAY_SIZE_MAX / sizeof(struct iovec)) +# define PROCESS_MADVISE_MAX_BATCH_LIMIT \ + (VARIABLE_ARRAY_SIZE_MAX / sizeof(struct iovec)) #else -#define PROCESS_MADVISE_MAX_BATCH_LIMIT 0 +# define PROCESS_MADVISE_MAX_BATCH_LIMIT 0 #endif edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, @@ -37,44 +38,43 @@ edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, bool zero, bool guarded); -void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - ecache_t *ecache, edata_t *edata); +void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *edata); edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, size_t npages_min); void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata); void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *edata); -void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata); +void extent_dalloc_gap( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata); edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, bool growing_retained); -void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata); -void extent_dalloc_wrapper_purged(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata); -void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata); +void extent_dalloc_wrapper( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata); +void extent_dalloc_wrapper_purged( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata); +void extent_destroy_wrapper( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata); bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length); bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length); bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length); -edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, - ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b, - bool holding_core_locks); -bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *a, edata_t *b); -bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - bool commit, bool zero, bool growing_retained); +edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks); +bool extent_merge_wrapper( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, edata_t *b); +bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + bool commit, bool zero, bool growing_retained); size_t extent_sn_next(pac_t *pac); -bool extent_boot(void); +bool extent_boot(void); JEMALLOC_ALWAYS_INLINE bool -extent_neighbor_head_state_mergeable(bool edata_is_head, - bool neighbor_is_head, bool forward) { +extent_neighbor_head_state_mergeable( + bool edata_is_head, bool neighbor_is_head, bool forward) { /* * Head states checking: disallow merging if the higher addr extent is a * head extent. This helps preserve first-fit, and more importantly @@ -102,8 +102,8 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents, } /* It's not safe to access *neighbor yet; must verify states first. */ bool neighbor_is_head = contents.metadata.is_head; - if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata), - neighbor_is_head, forward)) { + if (!extent_neighbor_head_state_mergeable( + edata_is_head_get(edata), neighbor_is_head, forward)) { return false; } extent_state_t neighbor_state = contents.metadata.state; @@ -112,8 +112,9 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents, return false; } /* From this point, it's safe to access *neighbor. */ - if (!expanding && (edata_committed_get(edata) != - edata_committed_get(neighbor))) { + if (!expanding + && (edata_committed_get(edata) + != edata_committed_get(neighbor))) { /* * Some platforms (e.g. Windows) require an explicit * commit step (and writing to uncommitted memory is not @@ -133,11 +134,11 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents, return false; } if (opt_retain) { - assert(edata_arena_ind_get(edata) == - edata_arena_ind_get(neighbor)); + assert(edata_arena_ind_get(edata) + == edata_arena_ind_get(neighbor)); } else { - if (edata_arena_ind_get(edata) != - edata_arena_ind_get(neighbor)) { + if (edata_arena_ind_get(edata) + != edata_arena_ind_get(neighbor)) { return false; } } diff --git a/include/jemalloc/internal/extent_dss.h b/include/jemalloc/internal/extent_dss.h index c8e71e82..4bb3f51d 100644 --- a/include/jemalloc/internal/extent_dss.h +++ b/include/jemalloc/internal/extent_dss.h @@ -6,11 +6,11 @@ #include "jemalloc/internal/tsd_types.h" typedef enum { - dss_prec_disabled = 0, - dss_prec_primary = 1, + dss_prec_disabled = 0, + dss_prec_primary = 1, dss_prec_secondary = 2, - dss_prec_limit = 3 + dss_prec_limit = 3 } dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" @@ -20,11 +20,11 @@ extern const char *const dss_prec_names[]; extern const char *opt_dss; dss_prec_t extent_dss_prec_get(void); -bool extent_dss_prec_set(dss_prec_t dss_prec); -void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit); -bool extent_in_dss(void *addr); -bool extent_dss_mergeable(void *addr_a, void *addr_b); -void extent_dss_boot(void); +bool extent_dss_prec_set(dss_prec_t dss_prec); +void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit); +bool extent_in_dss(void *addr); +bool extent_dss_mergeable(void *addr_a, void *addr_b); +void extent_dss_boot(void); #endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ diff --git a/include/jemalloc/internal/extent_mmap.h b/include/jemalloc/internal/extent_mmap.h index e6a4649e..aa469896 100644 --- a/include/jemalloc/internal/extent_mmap.h +++ b/include/jemalloc/internal/extent_mmap.h @@ -5,8 +5,8 @@ extern bool opt_retain; -void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit); +void *extent_alloc_mmap( + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool extent_dalloc_mmap(void *addr, size_t size); #endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ diff --git a/include/jemalloc/internal/fb.h b/include/jemalloc/internal/fb.h index e38095af..bf76f362 100644 --- a/include/jemalloc/internal/fb.h +++ b/include/jemalloc/internal/fb.h @@ -15,8 +15,8 @@ typedef unsigned long fb_group_t; #define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3)) -#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \ - + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1)) +#define FB_NGROUPS(nbits) \ + ((nbits) / FB_GROUP_BITS + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1)) static inline void fb_init(fb_group_t *fb, size_t nbits) { @@ -75,7 +75,6 @@ fb_unset(fb_group_t *fb, size_t nbits, size_t bit) { fb[group_ind] &= ~((fb_group_t)1 << bit_ind); } - /* * Some implementation details. This visitation function lets us apply a group * visitor to each group in the bitmap (potentially modifying it). The mask @@ -94,7 +93,8 @@ fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx, * to from bit 0. */ size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS - ? FB_GROUP_BITS - start_bit_ind : cnt); + ? FB_GROUP_BITS - start_bit_ind + : cnt); /* * We can basically split affected words into: * - The first group, where we touch only the high bits @@ -104,8 +104,8 @@ fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx, * this can lead to bad codegen for those middle words. */ /* First group */ - fb_group_t mask = ((~(fb_group_t)0) - >> (FB_GROUP_BITS - first_group_cnt)) + fb_group_t mask = + ((~(fb_group_t)0) >> (FB_GROUP_BITS - first_group_cnt)) << start_bit_ind; visit(ctx, &fb[group_ind], mask); @@ -176,12 +176,12 @@ fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { * Returns the number of bits in the bitmap if no such bit exists. */ JEMALLOC_ALWAYS_INLINE ssize_t -fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val, - bool forward) { +fb_find_impl( + fb_group_t *fb, size_t nbits, size_t start, bool val, bool forward) { assert(start < nbits); - size_t ngroups = FB_NGROUPS(nbits); + size_t ngroups = FB_NGROUPS(nbits); ssize_t group_ind = start / FB_GROUP_BITS; - size_t bit_ind = start % FB_GROUP_BITS; + size_t bit_ind = start % FB_GROUP_BITS; fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1); @@ -265,8 +265,8 @@ fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, return false; } /* Half open range; the set bits are [begin, end). */ - ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val, - forward); + ssize_t next_range_end = fb_find_impl( + fb, nbits, next_range_begin, !val, forward); if (forward) { *r_begin = next_range_begin; *r_len = next_range_end - next_range_begin; @@ -324,8 +324,9 @@ fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) { size_t begin = 0; size_t longest_len = 0; size_t len = 0; - while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin, - &len, val, /* forward */ true)) { + while (begin < nbits + && fb_iter_range_impl( + fb, nbits, begin, &begin, &len, val, /* forward */ true)) { if (len > longest_len) { longest_len = len; } diff --git a/include/jemalloc/internal/fxp.h b/include/jemalloc/internal/fxp.h index e42425f9..8ca4f3c6 100644 --- a/include/jemalloc/internal/fxp.h +++ b/include/jemalloc/internal/fxp.h @@ -89,7 +89,7 @@ fxp_round_down(fxp_t a) { static inline uint32_t fxp_round_nearest(fxp_t a) { - uint32_t fractional_part = (a & ((1U << 16) - 1)); + uint32_t fractional_part = (a & ((1U << 16) - 1)); uint32_t increment = (uint32_t)(fractional_part >= (1U << 15)); return (a >> 16) + increment; } diff --git a/include/jemalloc/internal/hash.h b/include/jemalloc/internal/hash.h index 15162b94..73e2214e 100644 --- a/include/jemalloc/internal/hash.h +++ b/include/jemalloc/internal/hash.h @@ -25,7 +25,7 @@ hash_rotl_64(uint64_t x, int8_t r) { static inline uint32_t hash_get_block_32(const uint32_t *p, int i) { /* Handle unaligned read. */ - if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { + if (unlikely((uintptr_t)p & (sizeof(uint32_t) - 1)) != 0) { uint32_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); @@ -38,7 +38,7 @@ hash_get_block_32(const uint32_t *p, int i) { static inline uint64_t hash_get_block_64(const uint64_t *p, int i) { /* Handle unaligned read. */ - if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { + if (unlikely((uintptr_t)p & (sizeof(uint64_t) - 1)) != 0) { uint64_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); @@ -72,8 +72,8 @@ hash_fmix_64(uint64_t k) { static inline uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 4; + const uint8_t *data = (const uint8_t *)key; + const int nblocks = len / 4; uint32_t h1 = seed; @@ -82,8 +82,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) { /* body */ { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); - int i; + const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4); + int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i); @@ -94,21 +94,29 @@ hash_x86_32(const void *key, int len, uint32_t seed) { h1 ^= k1; h1 = hash_rotl_32(h1, 13); - h1 = h1*5 + 0xe6546b64; + h1 = h1 * 5 + 0xe6546b64; } } /* tail */ { - const uint8_t *tail = (const uint8_t *) (data + nblocks*4); + const uint8_t *tail = (const uint8_t *)(data + nblocks * 4); uint32_t k1 = 0; switch (len & 3) { - case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH; - case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH; - case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); - k1 *= c2; h1 ^= k1; + case 3: + k1 ^= tail[2] << 16; + JEMALLOC_FALLTHROUGH; + case 2: + k1 ^= tail[1] << 8; + JEMALLOC_FALLTHROUGH; + case 1: + k1 ^= tail[0]; + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + h1 ^= k1; } } @@ -121,10 +129,9 @@ hash_x86_32(const void *key, int len, uint32_t seed) { } static inline void -hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) { - const uint8_t * data = (const uint8_t *) key; - const int nblocks = len / 16; +hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { + const uint8_t *data = (const uint8_t *)key; + const int nblocks = len / 16; uint32_t h1 = seed; uint32_t h2 = seed; @@ -138,95 +145,161 @@ hash_x86_128(const void *key, const int len, uint32_t seed, /* body */ { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); - int i; + const uint32_t *blocks = (const uint32_t *)(data + + nblocks * 16); + int i; for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); - uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); - uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); - uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); + uint32_t k1 = hash_get_block_32(blocks, i * 4 + 0); + uint32_t k2 = hash_get_block_32(blocks, i * 4 + 1); + uint32_t k3 = hash_get_block_32(blocks, i * 4 + 2); + uint32_t k4 = hash_get_block_32(blocks, i * 4 + 3); - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + h1 ^= k1; - h1 = hash_rotl_32(h1, 19); h1 += h2; - h1 = h1*5 + 0x561ccd1b; + h1 = hash_rotl_32(h1, 19); + h1 += h2; + h1 = h1 * 5 + 0x561ccd1b; - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + k2 *= c2; + k2 = hash_rotl_32(k2, 16); + k2 *= c3; + h2 ^= k2; - h2 = hash_rotl_32(h2, 17); h2 += h3; - h2 = h2*5 + 0x0bcaa747; + h2 = hash_rotl_32(h2, 17); + h2 += h3; + h2 = h2 * 5 + 0x0bcaa747; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + k3 *= c3; + k3 = hash_rotl_32(k3, 17); + k3 *= c4; + h3 ^= k3; - h3 = hash_rotl_32(h3, 15); h3 += h4; - h3 = h3*5 + 0x96cd1c35; + h3 = hash_rotl_32(h3, 15); + h3 += h4; + h3 = h3 * 5 + 0x96cd1c35; - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + k4 *= c4; + k4 = hash_rotl_32(k4, 18); + k4 *= c1; + h4 ^= k4; - h4 = hash_rotl_32(h4, 13); h4 += h1; - h4 = h4*5 + 0x32ac3b17; + h4 = hash_rotl_32(h4, 13); + h4 += h1; + h4 = h4 * 5 + 0x32ac3b17; } } /* tail */ { - const uint8_t *tail = (const uint8_t *) (data + nblocks*16); - uint32_t k1 = 0; - uint32_t k2 = 0; - uint32_t k3 = 0; - uint32_t k4 = 0; + const uint8_t *tail = (const uint8_t *)(data + nblocks * 16); + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; switch (len & 15) { - case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH; - case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH; - case 13: k4 ^= tail[12] << 0; - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + case 15: + k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH; - case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH; - case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH; - case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH; - case 9: k3 ^= tail[ 8] << 0; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + case 14: + k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH; - case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH; - case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH; - case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH; - case 5: k2 ^= tail[ 4] << 0; - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + case 13: + k4 ^= tail[12] << 0; + k4 *= c4; + k4 = hash_rotl_32(k4, 18); + k4 *= c1; + h4 ^= k4; JEMALLOC_FALLTHROUGH; - case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH; - case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH; - case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH; - case 1: k1 ^= tail[ 0] << 0; - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + case 12: + k3 ^= (uint32_t)tail[11] << 24; + JEMALLOC_FALLTHROUGH; + case 11: + k3 ^= tail[10] << 16; + JEMALLOC_FALLTHROUGH; + case 10: + k3 ^= tail[9] << 8; + JEMALLOC_FALLTHROUGH; + case 9: + k3 ^= tail[8] << 0; + k3 *= c3; + k3 = hash_rotl_32(k3, 17); + k3 *= c4; + h3 ^= k3; + JEMALLOC_FALLTHROUGH; + case 8: + k2 ^= (uint32_t)tail[7] << 24; + JEMALLOC_FALLTHROUGH; + case 7: + k2 ^= tail[6] << 16; + JEMALLOC_FALLTHROUGH; + case 6: + k2 ^= tail[5] << 8; + JEMALLOC_FALLTHROUGH; + case 5: + k2 ^= tail[4] << 0; + k2 *= c2; + k2 = hash_rotl_32(k2, 16); + k2 *= c3; + h2 ^= k2; + JEMALLOC_FALLTHROUGH; + case 4: + k1 ^= (uint32_t)tail[3] << 24; + JEMALLOC_FALLTHROUGH; + case 3: + k1 ^= tail[2] << 16; + JEMALLOC_FALLTHROUGH; + case 2: + k1 ^= tail[1] << 8; + JEMALLOC_FALLTHROUGH; + case 1: + k1 ^= tail[0] << 0; + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + h1 ^= k1; break; } } /* finalization */ - h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + h1 ^= len; + h2 ^= len; + h3 ^= len; + h4 ^= len; - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; + h1 += h2; + h1 += h3; + h1 += h4; + h2 += h1; + h3 += h1; + h4 += h1; h1 = hash_fmix_32(h1); h2 = hash_fmix_32(h2); h3 = hash_fmix_32(h3); h4 = hash_fmix_32(h4); - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; + h1 += h2; + h1 += h3; + h1 += h4; + h2 += h1; + h3 += h1; + h4 += h1; - r_out[0] = (((uint64_t) h2) << 32) | h1; - r_out[1] = (((uint64_t) h4) << 32) | h3; + r_out[0] = (((uint64_t)h2) << 32) | h1; + r_out[1] = (((uint64_t)h4) << 32) | h3; } static inline void -hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) { - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 16; +hash_x64_128( + const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { + const uint8_t *data = (const uint8_t *)key; + const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; @@ -236,56 +309,99 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, /* body */ { - const uint64_t *blocks = (const uint64_t *) (data); - int i; + const uint64_t *blocks = (const uint64_t *)(data); + int i; for (i = 0; i < nblocks; i++) { - uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); - uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); + uint64_t k1 = hash_get_block_64(blocks, i * 2 + 0); + uint64_t k2 = hash_get_block_64(blocks, i * 2 + 1); - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + k1 *= c1; + k1 = hash_rotl_64(k1, 31); + k1 *= c2; + h1 ^= k1; - h1 = hash_rotl_64(h1, 27); h1 += h2; - h1 = h1*5 + 0x52dce729; + h1 = hash_rotl_64(h1, 27); + h1 += h2; + h1 = h1 * 5 + 0x52dce729; - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + k2 *= c2; + k2 = hash_rotl_64(k2, 33); + k2 *= c1; + h2 ^= k2; - h2 = hash_rotl_64(h2, 31); h2 += h1; - h2 = h2*5 + 0x38495ab5; + h2 = hash_rotl_64(h2, 31); + h2 += h1; + h2 = h2 * 5 + 0x38495ab5; } } /* tail */ { - const uint8_t *tail = (const uint8_t*)(data + nblocks*16); - uint64_t k1 = 0; - uint64_t k2 = 0; + const uint8_t *tail = (const uint8_t *)(data + nblocks * 16); + uint64_t k1 = 0; + uint64_t k2 = 0; switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH; - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH; - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH; - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH; - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH; - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH; - case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + case 15: + k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH; - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH; - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH; - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH; - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH; - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH; - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH; - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH; - case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + case 14: + k2 ^= ((uint64_t)(tail[13])) << 40; + JEMALLOC_FALLTHROUGH; + case 13: + k2 ^= ((uint64_t)(tail[12])) << 32; + JEMALLOC_FALLTHROUGH; + case 12: + k2 ^= ((uint64_t)(tail[11])) << 24; + JEMALLOC_FALLTHROUGH; + case 11: + k2 ^= ((uint64_t)(tail[10])) << 16; + JEMALLOC_FALLTHROUGH; + case 10: + k2 ^= ((uint64_t)(tail[9])) << 8; + JEMALLOC_FALLTHROUGH; + case 9: + k2 ^= ((uint64_t)(tail[8])) << 0; + k2 *= c2; + k2 = hash_rotl_64(k2, 33); + k2 *= c1; + h2 ^= k2; + JEMALLOC_FALLTHROUGH; + case 8: + k1 ^= ((uint64_t)(tail[7])) << 56; + JEMALLOC_FALLTHROUGH; + case 7: + k1 ^= ((uint64_t)(tail[6])) << 48; + JEMALLOC_FALLTHROUGH; + case 6: + k1 ^= ((uint64_t)(tail[5])) << 40; + JEMALLOC_FALLTHROUGH; + case 5: + k1 ^= ((uint64_t)(tail[4])) << 32; + JEMALLOC_FALLTHROUGH; + case 4: + k1 ^= ((uint64_t)(tail[3])) << 24; + JEMALLOC_FALLTHROUGH; + case 3: + k1 ^= ((uint64_t)(tail[2])) << 16; + JEMALLOC_FALLTHROUGH; + case 2: + k1 ^= ((uint64_t)(tail[1])) << 8; + JEMALLOC_FALLTHROUGH; + case 1: + k1 ^= ((uint64_t)(tail[0])) << 0; + k1 *= c1; + k1 = hash_rotl_64(k1, 31); + k1 *= c2; + h1 ^= k1; break; } } /* finalization */ - h1 ^= len; h2 ^= len; + h1 ^= len; + h2 ^= len; h1 += h2; h2 += h1; diff --git a/include/jemalloc/internal/hook.h b/include/jemalloc/internal/hook.h index 76b9130d..bbbcb320 100644 --- a/include/jemalloc/internal/hook.h +++ b/include/jemalloc/internal/hook.h @@ -83,7 +83,6 @@ enum hook_dalloc_e { }; typedef enum hook_dalloc_e hook_dalloc_t; - enum hook_expand_e { hook_expand_realloc, hook_expand_rallocx, @@ -91,23 +90,22 @@ enum hook_expand_e { }; typedef enum hook_expand_e hook_expand_t; -typedef void (*hook_alloc)( - void *extra, hook_alloc_t type, void *result, uintptr_t result_raw, - uintptr_t args_raw[3]); +typedef void (*hook_alloc)(void *extra, hook_alloc_t type, void *result, + uintptr_t result_raw, uintptr_t args_raw[3]); typedef void (*hook_dalloc)( void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]); -typedef void (*hook_expand)( - void *extra, hook_expand_t type, void *address, size_t old_usize, - size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); +typedef void (*hook_expand)(void *extra, hook_expand_t type, void *address, + size_t old_usize, size_t new_usize, uintptr_t result_raw, + uintptr_t args_raw[4]); typedef struct hooks_s hooks_t; struct hooks_s { - hook_alloc alloc_hook; + hook_alloc alloc_hook; hook_dalloc dalloc_hook; hook_expand expand_hook; - void *extra; + void *extra; }; /* @@ -156,8 +154,8 @@ void hook_remove(tsdn_t *tsdn, void *opaque); void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, uintptr_t args_raw[3]); -void hook_invoke_dalloc(hook_dalloc_t type, void *address, - uintptr_t args_raw[3]); +void hook_invoke_dalloc( + hook_dalloc_t type, void *address, uintptr_t args_raw[3]); void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); diff --git a/include/jemalloc/internal/hpa.h b/include/jemalloc/internal/hpa.h index 2e9fccc2..7a6ba0b9 100644 --- a/include/jemalloc/internal/hpa.h +++ b/include/jemalloc/internal/hpa.h @@ -27,7 +27,7 @@ struct hpa_central_s { * * Guarded by grow_mtx. */ - void *eden; + void *eden; size_t eden_len; /* Source for metadata. */ base_t *base; @@ -78,7 +78,7 @@ struct hpa_shard_nonderived_stats_s { /* Completely derived; only used by CTL. */ typedef struct hpa_shard_stats_s hpa_shard_stats_t; struct hpa_shard_stats_s { - psset_stats_t psset_stats; + psset_stats_t psset_stats; hpa_shard_nonderived_stats_t nonderived_stats; }; @@ -156,14 +156,15 @@ bool hpa_hugepage_size_exceeds_limit(void); * just that it can function properly given the system it's running on. */ bool hpa_supported(void); -bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks); +bool hpa_central_init( + hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks); bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap, base_t *base, edata_cache_t *edata_cache, unsigned ind, const hpa_shard_opts_t *opts); void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src); -void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, - hpa_shard_stats_t *dst); +void hpa_shard_stats_merge( + tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst); /* * Notify the shard that we won't use it for allocations much longer. Due to @@ -173,8 +174,8 @@ void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard); void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard); -void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard, - bool deferral_allowed); +void hpa_shard_set_deferral_allowed( + tsdn_t *tsdn, hpa_shard_t *shard, bool deferral_allowed); void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard); /* diff --git a/include/jemalloc/internal/hpa_hooks.h b/include/jemalloc/internal/hpa_hooks.h index d0618f89..f50ff58f 100644 --- a/include/jemalloc/internal/hpa_hooks.h +++ b/include/jemalloc/internal/hpa_hooks.h @@ -13,7 +13,7 @@ struct hpa_hooks_s { void (*dehugify)(void *ptr, size_t size); void (*curtime)(nstime_t *r_time, bool first_reading); uint64_t (*ms_since)(nstime_t *r_time); - bool (*vectorized_purge)(void* vec, size_t vlen, size_t nbytes); + bool (*vectorized_purge)(void *vec, size_t vlen, size_t nbytes); }; extern const hpa_hooks_t hpa_hooks_default; diff --git a/include/jemalloc/internal/hpa_utils.h b/include/jemalloc/internal/hpa_utils.h index 283510b9..53bcb670 100644 --- a/include/jemalloc/internal/hpa_utils.h +++ b/include/jemalloc/internal/hpa_utils.h @@ -8,26 +8,27 @@ typedef struct iovec hpa_io_vector_t; #else typedef struct { - void *iov_base; - size_t iov_len; + void *iov_base; + size_t iov_len; } hpa_io_vector_t; #endif /* Actually invoke hooks. If we fail vectorized, use single purges */ static void hpa_try_vectorized_purge( - hpa_shard_t *shard, hpa_io_vector_t *vec, size_t vlen, size_t nbytes) { - bool success = opt_process_madvise_max_batch > 0 - && !shard->central->hooks.vectorized_purge(vec, vlen, nbytes); - if (!success) { - /* On failure, it is safe to purge again (potential perf + hpa_shard_t *shard, hpa_io_vector_t *vec, size_t vlen, size_t nbytes) { + bool success = opt_process_madvise_max_batch > 0 + && !shard->central->hooks.vectorized_purge(vec, vlen, nbytes); + if (!success) { + /* On failure, it is safe to purge again (potential perf * penalty) If kernel can tell exactly which regions * failed, we could avoid that penalty. */ - for (size_t i = 0; i < vlen; ++i) { - shard->central->hooks.purge(vec[i].iov_base, vec[i].iov_len); - } - } + for (size_t i = 0; i < vlen; ++i) { + shard->central->hooks.purge( + vec[i].iov_base, vec[i].iov_len); + } + } } /* @@ -35,48 +36,48 @@ hpa_try_vectorized_purge( * It invokes the hook when batch limit is reached */ typedef struct { - hpa_io_vector_t *vp; - size_t cur; - size_t total_bytes; - size_t capacity; + hpa_io_vector_t *vp; + size_t cur; + size_t total_bytes; + size_t capacity; } hpa_range_accum_t; static inline void hpa_range_accum_init(hpa_range_accum_t *ra, hpa_io_vector_t *v, size_t sz) { - ra->vp = v; - ra->capacity = sz; - ra->total_bytes = 0; - ra->cur = 0; + ra->vp = v; + ra->capacity = sz; + ra->total_bytes = 0; + ra->cur = 0; } static inline void hpa_range_accum_flush(hpa_range_accum_t *ra, hpa_shard_t *shard) { - assert(ra->total_bytes > 0 && ra->cur > 0); - hpa_try_vectorized_purge(shard, ra->vp, ra->cur, ra->total_bytes); - ra->cur = 0; - ra->total_bytes = 0; + assert(ra->total_bytes > 0 && ra->cur > 0); + hpa_try_vectorized_purge(shard, ra->vp, ra->cur, ra->total_bytes); + ra->cur = 0; + ra->total_bytes = 0; } static inline void hpa_range_accum_add( - hpa_range_accum_t *ra, void *addr, size_t sz, hpa_shard_t *shard) { - assert(ra->cur < ra->capacity); + hpa_range_accum_t *ra, void *addr, size_t sz, hpa_shard_t *shard) { + assert(ra->cur < ra->capacity); - ra->vp[ra->cur].iov_base = addr; - ra->vp[ra->cur].iov_len = sz; - ra->total_bytes += sz; - ra->cur++; + ra->vp[ra->cur].iov_base = addr; + ra->vp[ra->cur].iov_len = sz; + ra->total_bytes += sz; + ra->cur++; - if (ra->cur == ra->capacity) { - hpa_range_accum_flush(ra, shard); - } + if (ra->cur == ra->capacity) { + hpa_range_accum_flush(ra, shard); + } } static inline void hpa_range_accum_finish(hpa_range_accum_t *ra, hpa_shard_t *shard) { - if (ra->cur > 0) { - hpa_range_accum_flush(ra, shard); - } + if (ra->cur > 0) { + hpa_range_accum_flush(ra, shard); + } } /* @@ -84,14 +85,14 @@ hpa_range_accum_finish(hpa_range_accum_t *ra, hpa_shard_t *shard) { */ typedef struct { hpdata_purge_state_t state; - hpdata_t *hp; - bool dehugify; + hpdata_t *hp; + bool dehugify; } hpa_purge_item_t; typedef struct hpa_purge_batch_s hpa_purge_batch_t; struct hpa_purge_batch_s { hpa_purge_item_t *items; - size_t items_capacity; + size_t items_capacity; /* Number of huge pages to purge in current batch */ size_t item_cnt; /* Number of ranges to purge in current batch */ diff --git a/include/jemalloc/internal/hpdata.h b/include/jemalloc/internal/hpdata.h index a8a4a552..75550f9b 100644 --- a/include/jemalloc/internal/hpdata.h +++ b/include/jemalloc/internal/hpdata.h @@ -73,7 +73,7 @@ struct hpdata_s { bool h_hugify_allowed; /* When we became a hugification candidate. */ nstime_t h_time_hugify_allowed; - bool h_in_psset_hugify_container; + bool h_in_psset_hugify_container; /* Whether or not a purge or hugify is currently happening. */ bool h_mid_purge; @@ -186,8 +186,8 @@ hpdata_purge_allowed_get(const hpdata_t *hpdata) { static inline void hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) { - assert(purge_allowed == false || !hpdata->h_mid_purge); - hpdata->h_purge_allowed = purge_allowed; + assert(purge_allowed == false || !hpdata->h_mid_purge); + hpdata->h_purge_allowed = purge_allowed; } static inline bool @@ -250,7 +250,6 @@ hpdata_changing_state_get(const hpdata_t *hpdata) { return hpdata->h_mid_purge || hpdata->h_mid_hugify; } - static inline bool hpdata_updating_get(const hpdata_t *hpdata) { return hpdata->h_updating; @@ -317,7 +316,7 @@ hpdata_assert_empty(hpdata_t *hpdata) { */ static inline bool hpdata_consistent(hpdata_t *hpdata) { - if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES) + if (fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES) != hpdata_longest_free_range_get(hpdata)) { return false; } @@ -368,7 +367,7 @@ void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age); * offset within that allocation. */ void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz); -void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz); +void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz); /* * The hpdata_purge_prepare_t allows grabbing the metadata required to purge @@ -377,10 +376,10 @@ void hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz); */ typedef struct hpdata_purge_state_s hpdata_purge_state_t; struct hpdata_purge_state_s { - size_t npurged; - size_t ndirty_to_purge; + size_t npurged; + size_t ndirty_to_purge; fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)]; - size_t next_purge_search_begin; + size_t next_purge_search_begin; }; /* @@ -398,8 +397,8 @@ struct hpdata_purge_state_s { * Returns the number of dirty pages that will be purged and sets nranges * to number of ranges with dirty pages that will be purged. */ -size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, - size_t *nranges); +size_t hpdata_purge_begin( + hpdata_t *hpdata, hpdata_purge_state_t *purge_state, size_t *nranges); /* * If there are more extents to purge, sets *r_purge_addr and *r_purge_size to diff --git a/include/jemalloc/internal/inspect.h b/include/jemalloc/internal/inspect.h index 0da920ca..e8ed44d3 100644 --- a/include/jemalloc/internal/inspect.h +++ b/include/jemalloc/internal/inspect.h @@ -26,7 +26,7 @@ typedef struct inspect_extent_util_stats_verbose_s inspect_extent_util_stats_verbose_t; struct inspect_extent_util_stats_verbose_s { - void *slabcur_addr; + void *slabcur_addr; size_t nfree; size_t nregs; size_t size; @@ -34,10 +34,10 @@ struct inspect_extent_util_stats_verbose_s { size_t bin_nregs; }; -void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, - size_t *nfree, size_t *nregs, size_t *size); +void inspect_extent_util_stats_get( + tsdn_t *tsdn, const void *ptr, size_t *nfree, size_t *nregs, size_t *size); void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, - size_t *nfree, size_t *nregs, size_t *size, - size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr); + size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree, + size_t *bin_nregs, void **slabcur_addr); #endif /* JEMALLOC_INTERNAL_INSPECT_H */ diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h index 0bca9133..2ca12c4a 100644 --- a/include/jemalloc/internal/jemalloc_internal_decls.h +++ b/include/jemalloc/internal/jemalloc_internal_decls.h @@ -3,64 +3,65 @@ #include #ifdef _WIN32 -# include -# include "msvc_compat/windows_extra.h" -# include "msvc_compat/strings.h" -# ifdef _WIN64 -# if LG_VADDR <= 32 -# error Generate the headers using x64 vcargs -# endif -# else -# if LG_VADDR > 32 -# undef LG_VADDR -# define LG_VADDR 32 -# endif -# endif +# include +# include "msvc_compat/windows_extra.h" +# include "msvc_compat/strings.h" +# ifdef _WIN64 +# if LG_VADDR <= 32 +# error Generate the headers using x64 vcargs +# endif +# else +# if LG_VADDR > 32 +# undef LG_VADDR +# define LG_VADDR 32 +# endif +# endif #else -# include -# include -# if !defined(__pnacl__) && !defined(__native_client__) -# include -# if !defined(SYS_write) && defined(__NR_write) -# define SYS_write __NR_write -# endif -# if defined(SYS_open) && defined(__aarch64__) - /* Android headers may define SYS_open to __NR_open even though +# include +# include +# if !defined(__pnacl__) && !defined(__native_client__) +# include +# if !defined(SYS_write) && defined(__NR_write) +# define SYS_write __NR_write +# endif +# if defined(SYS_open) && defined(__aarch64__) +/* Android headers may define SYS_open to __NR_open even though * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ -# undef SYS_open -# endif -# include -# endif -# include -# if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__OpenBSD__) -# include -# include -# if defined(__FreeBSD__) -# define cpu_set_t cpuset_t -# endif -# endif -# include -# ifdef JEMALLOC_OS_UNFAIR_LOCK -# include -# endif -# ifdef JEMALLOC_GLIBC_MALLOC_HOOK -# include -# endif -# include -# include -# include -# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME -# include -# endif +# undef SYS_open +# endif +# include +# endif +# include +# if defined(__FreeBSD__) || defined(__DragonFly__) \ + || defined(__OpenBSD__) +# include +# include +# if defined(__FreeBSD__) +# define cpu_set_t cpuset_t +# endif +# endif +# include +# ifdef JEMALLOC_OS_UNFAIR_LOCK +# include +# endif +# ifdef JEMALLOC_GLIBC_MALLOC_HOOK +# include +# endif +# include +# include +# include +# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# include +# endif #endif #include #include #ifndef SIZE_T_MAX -# define SIZE_T_MAX SIZE_MAX +# define SIZE_T_MAX SIZE_MAX #endif #ifndef SSIZE_MAX -# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) +# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) #endif #include #include @@ -69,30 +70,30 @@ #include #include #ifndef offsetof -# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) +# define offsetof(type, member) ((size_t) & (((type *)NULL)->member)) #endif #include #include #include #ifdef _MSC_VER -# include +# include typedef intptr_t ssize_t; -# define PATH_MAX 1024 -# define STDERR_FILENO 2 -# define __func__ __FUNCTION__ -# ifdef JEMALLOC_HAS_RESTRICT -# define restrict __restrict -# endif +# define PATH_MAX 1024 +# define STDERR_FILENO 2 +# define __func__ __FUNCTION__ +# ifdef JEMALLOC_HAS_RESTRICT +# define restrict __restrict +# endif /* Disable warnings about deprecated system functions. */ -# pragma warning(disable: 4996) -#if _MSC_VER < 1800 +# pragma warning(disable : 4996) +# if _MSC_VER < 1800 static int isblank(int c) { return (c == '\t' || c == ' '); } -#endif +# endif #else -# include +# include #endif #include @@ -102,7 +103,7 @@ isblank(int c) { * classes. */ #ifdef small -# undef small +# undef small #endif /* diff --git a/include/jemalloc/internal/jemalloc_internal_externs.h b/include/jemalloc/internal/jemalloc_internal_externs.h index 3b42f833..b502c7e7 100644 --- a/include/jemalloc/internal/jemalloc_internal_externs.h +++ b/include/jemalloc/internal/jemalloc_internal_externs.h @@ -12,34 +12,34 @@ extern bool malloc_slow; /* Run-time options. */ -extern bool opt_abort; -extern bool opt_abort_conf; -extern bool opt_trust_madvise; -extern bool opt_confirm_conf; -extern bool opt_hpa; +extern bool opt_abort; +extern bool opt_abort_conf; +extern bool opt_trust_madvise; +extern bool opt_confirm_conf; +extern bool opt_hpa; extern hpa_shard_opts_t opt_hpa_opts; -extern sec_opts_t opt_hpa_sec_opts; +extern sec_opts_t opt_hpa_sec_opts; extern const char *opt_junk; -extern bool opt_junk_alloc; -extern bool opt_junk_free; +extern bool opt_junk_alloc; +extern bool opt_junk_free; extern void (*JET_MUTABLE junk_free_callback)(void *ptr, size_t size); extern void (*JET_MUTABLE junk_alloc_callback)(void *ptr, size_t size); extern void (*JET_MUTABLE invalid_conf_abort)(void); -extern bool opt_utrace; -extern bool opt_xmalloc; -extern bool opt_experimental_infallible_new; -extern bool opt_experimental_tcache_gc; -extern bool opt_zero; -extern unsigned opt_narenas; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_experimental_infallible_new; +extern bool opt_experimental_tcache_gc; +extern bool opt_zero; +extern unsigned opt_narenas; extern zero_realloc_action_t opt_zero_realloc_action; -extern malloc_init_t malloc_init_state; -extern const char *const zero_realloc_mode_names[]; -extern atomic_zu_t zero_realloc_count; -extern bool opt_cache_oblivious; -extern unsigned opt_debug_double_free_max_scan; -extern size_t opt_calloc_madvise_threshold; -extern bool opt_disable_large_size_classes; +extern malloc_init_t malloc_init_state; +extern const char *const zero_realloc_mode_names[]; +extern atomic_zu_t zero_realloc_count; +extern bool opt_cache_oblivious; +extern unsigned opt_debug_double_free_max_scan; +extern size_t opt_calloc_madvise_threshold; +extern bool opt_disable_large_size_classes; extern const char *opt_malloc_conf_symlink; extern const char *opt_malloc_conf_env_var; @@ -64,24 +64,24 @@ extern atomic_p_t arenas[]; extern unsigned huge_arena_ind; -void *a0malloc(size_t size); -void a0dalloc(void *ptr); -void *bootstrap_malloc(size_t size); -void *bootstrap_calloc(size_t num, size_t size); -void bootstrap_free(void *ptr); -void arena_set(unsigned ind, arena_t *arena); +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +void arena_set(unsigned ind, arena_t *arena); unsigned narenas_total_get(void); arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config); arena_t *arena_choose_hard(tsd_t *tsd, bool internal); -void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena); -void iarena_cleanup(tsd_t *tsd); -void arena_cleanup(tsd_t *tsd); -size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags); -void jemalloc_prefork(void); -void jemalloc_postfork_parent(void); -void jemalloc_postfork_child(void); -void sdallocx_default(void *ptr, size_t size, int flags); -void free_default(void *ptr); -void *malloc_default(size_t size); +void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena); +void iarena_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); +void sdallocx_default(void *ptr, size_t size, int flags); +void free_default(void *ptr); +void *malloc_default(size_t size); #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/include/jemalloc/internal/jemalloc_internal_inlines_a.h index 111cda42..8513effd 100644 --- a/include/jemalloc/internal/jemalloc_internal_inlines_a.h +++ b/include/jemalloc/internal/jemalloc_internal_inlines_a.h @@ -20,12 +20,12 @@ malloc_getcpu(void) { return (malloc_cpuid_t)sched_getcpu(); #elif defined(JEMALLOC_HAVE_RDTSCP) unsigned int ecx; - asm volatile("rdtscp" : "=c" (ecx) :: "eax", "edx"); + asm volatile("rdtscp" : "=c"(ecx)::"eax", "edx"); return (malloc_cpuid_t)(ecx & 0xfff); #elif defined(__aarch64__) && defined(__APPLE__) /* Other oses most likely use tpidr_el0 instead */ uintptr_t c; - asm volatile("mrs %x0, tpidrro_el0" : "=r"(c) :: "memory"); + asm volatile("mrs %x0, tpidrro_el0" : "=r"(c)::"memory"); return (malloc_cpuid_t)(c & (1 << 3) - 1); #else not_reached(); @@ -42,8 +42,8 @@ percpu_arena_choose(void) { assert(cpuid >= 0); unsigned arena_ind; - if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / - 2)) { + if ((opt_percpu_arena == percpu_arena) + || ((unsigned)cpuid < ncpus / 2)) { arena_ind = cpuid; } else { assert(opt_percpu_arena == per_phycpu_arena); diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/include/jemalloc/internal/jemalloc_internal_inlines_b.h index 2ddb4a89..dad37a9c 100644 --- a/include/jemalloc/internal/jemalloc_internal_inlines_b.h +++ b/include/jemalloc/internal/jemalloc_internal_inlines_b.h @@ -24,13 +24,12 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) { if (tcache != NULL) { tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); assert(tcache_slow->arena != NULL); - tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow, - tcache, newarena); + tcache_arena_reassociate( + tsd_tsdn(tsd), tcache_slow, tcache, newarena); } } } - /* Choose an arena based on a per-thread value. */ static inline arena_t * arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { @@ -51,18 +50,18 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { assert(ret); if (tcache_available(tsd)) { tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); - tcache_t *tcache = tsd_tcachep_get(tsd); + tcache_t *tcache = tsd_tcachep_get(tsd); if (tcache_slow->arena != NULL) { /* See comments in tsd_tcache_data_init().*/ - assert(tcache_slow->arena == - arena_get(tsd_tsdn(tsd), 0, false)); + assert(tcache_slow->arena + == arena_get(tsd_tsdn(tsd), 0, false)); if (tcache_slow->arena != ret) { tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow, tcache, ret); } } else { - tcache_arena_associate(tsd_tsdn(tsd), - tcache_slow, tcache, ret); + tcache_arena_associate( + tsd_tsdn(tsd), tcache_slow, tcache, ret); } } } @@ -72,10 +71,10 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { * auto percpu arena range, (i.e. thread is assigned to a manually * managed arena), then percpu arena is skipped. */ - if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && - !internal && (arena_ind_get(ret) < - percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != - tsd_tsdn(tsd))) { + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) + && !internal + && (arena_ind_get(ret) < percpu_arena_ind_limit(opt_percpu_arena)) + && (ret->last_thd != tsd_tsdn(tsd))) { unsigned ind = percpu_arena_choose(); if (arena_ind_get(ret) != ind) { percpu_arena_update(tsd, ind); diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/include/jemalloc/internal/jemalloc_internal_inlines_c.h index 39c196a5..2c61f8c4 100644 --- a/include/jemalloc/internal/jemalloc_internal_inlines_c.h +++ b/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -63,11 +63,12 @@ iallocztm_explicit_slab(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, assert(!is_internal || tcache == NULL); assert(!is_internal || arena == NULL || arena_is_auto(arena)); if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); } - ret = arena_malloc(tsdn, arena, size, ind, zero, slab, tcache, slow_path); + ret = arena_malloc( + tsdn, arena, size, ind, zero, slab, tcache, slow_path); if (config_stats && is_internal && likely(ret != NULL)) { arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); } @@ -78,8 +79,8 @@ JEMALLOC_ALWAYS_INLINE void * iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool is_internal, arena_t *arena, bool slow_path) { bool slab = sz_can_use_slab(size); - return iallocztm_explicit_slab(tsdn, size, ind, zero, slab, tcache, - is_internal, arena, slow_path); + return iallocztm_explicit_slab( + tsdn, size, ind, zero, slab, tcache, is_internal, arena, slow_path); } JEMALLOC_ALWAYS_INLINE void * @@ -89,8 +90,8 @@ ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { } JEMALLOC_ALWAYS_INLINE void * -ipallocztm_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - bool slab, tcache_t *tcache, bool is_internal, arena_t *arena) { +ipallocztm_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, + bool zero, bool slab, tcache_t *tcache, bool is_internal, arena_t *arena) { void *ret; assert(!slab || sz_can_use_slab(usize)); /* slab && large is illegal */ @@ -98,8 +99,8 @@ ipallocztm_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero assert(usize == sz_sa2u(usize, alignment)); assert(!is_internal || tcache == NULL); assert(!is_internal || arena == NULL || arena_is_auto(arena)); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); ret = arena_palloc(tsdn, arena, usize, alignment, zero, slab, tcache); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); @@ -123,10 +124,10 @@ ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, } JEMALLOC_ALWAYS_INLINE void * -ipalloct_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, - bool zero, bool slab, tcache_t *tcache, arena_t *arena) { - return ipallocztm_explicit_slab(tsdn, usize, alignment, zero, slab, - tcache, false, arena); +ipalloct_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + bool slab, tcache_t *tcache, arena_t *arena) { + return ipallocztm_explicit_slab( + tsdn, usize, alignment, zero, slab, tcache, false, arena); } JEMALLOC_ALWAYS_INLINE void * @@ -146,13 +147,13 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, assert(ptr != NULL); assert(!is_internal || tcache == NULL); assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (config_stats && is_internal) { arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); } - if (!is_internal && !tsdn_null(tsdn) && - tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { + if (!is_internal && !tsdn_null(tsdn) + && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { assert(tcache == NULL); } arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); @@ -166,8 +167,8 @@ idalloc(tsd_t *tsd, void *ptr) { JEMALLOC_ALWAYS_INLINE void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, emap_alloc_ctx_t *alloc_ctx, bool slow_path) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); } @@ -175,17 +176,17 @@ JEMALLOC_ALWAYS_INLINE void * iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); - void *p; + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); + void *p; size_t usize, copysize; usize = sz_sa2u(size, alignment); if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return NULL; } - p = ipalloct_explicit_slab(tsdn, usize, alignment, zero, slab, - tcache, arena); + p = ipalloct_explicit_slab( + tsdn, usize, alignment, zero, slab, tcache, arena); if (p == NULL) { return NULL; } @@ -195,11 +196,12 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, */ copysize = (size < oldsize) ? size : oldsize; memcpy(p, ptr, copysize); - hook_invoke_alloc(hook_args->is_realloc - ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p, - hook_args->args); - hook_invoke_dalloc(hook_args->is_realloc - ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); + hook_invoke_alloc( + hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, p, + (uintptr_t)p, hook_args->args); + hook_invoke_dalloc( + hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx, + ptr, hook_args->args); isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); return p; } @@ -214,15 +216,14 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, JEMALLOC_ALWAYS_INLINE void * iralloct_explicit_slab(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena, - hook_ralloc_args_t *hook_args) -{ + hook_ralloc_args_t *hook_args) { assert(ptr != NULL); assert(size != 0); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { + if (alignment != 0 + && ((uintptr_t)ptr & ((uintptr_t)alignment - 1)) != 0) { /* * Existing object alignment is inadequate; allocate new space * and copy. @@ -238,8 +239,7 @@ iralloct_explicit_slab(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, JEMALLOC_ALWAYS_INLINE void * iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena, - hook_ralloc_args_t *hook_args) -{ + hook_ralloc_args_t *hook_args) { bool slab = sz_can_use_slab(usize); return iralloct_explicit_slab(tsdn, ptr, oldsize, size, alignment, zero, slab, tcache, arena, hook_args); @@ -257,23 +257,23 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, size_t *newsize) { assert(ptr != NULL); assert(size != 0); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { + if (alignment != 0 + && ((uintptr_t)ptr & ((uintptr_t)alignment - 1)) != 0) { /* Existing object alignment is inadequate. */ *newsize = oldsize; return true; } - return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero, - newsize); + return arena_ralloc_no_move( + tsdn, ptr, oldsize, size, extra, zero, newsize); } JEMALLOC_ALWAYS_INLINE void -fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after, - cache_bin_t *bin, void *ret) { +fastpath_success_finish( + tsd_t *tsd, uint64_t allocated_after, cache_bin_t *bin, void *ret) { thread_allocated_set(tsd, allocated_after); if (config_stats) { bin->tstats.nrequests++; @@ -331,8 +331,8 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { sz_size2index_usize_fastpath(size, &ind, &usize); /* Fast path relies on size being a bin. */ assert(ind < SC_NBINS); - assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) && - (size <= SC_SMALL_MAXCLASS)); + assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) + && (size <= SC_SMALL_MAXCLASS)); uint64_t allocated, threshold; te_malloc_fastpath_ctx(tsd, &allocated, &threshold); @@ -363,7 +363,7 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { cache_bin_t *bin = &tcache->bins[ind]; /* Suppress spurious warning from static analysis */ assert(bin != NULL); - bool tcache_success; + bool tcache_success; void *ret; /* @@ -388,56 +388,56 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { JEMALLOC_ALWAYS_INLINE tcache_t * tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) { - tcache_t *tcache; - if (tcache_ind == TCACHE_IND_AUTOMATIC) { - if (likely(!slow)) { - /* Getting tcache ptr unconditionally. */ - tcache = tsd_tcachep_get(tsd); - assert(tcache == tcache_get(tsd)); - } else if (is_alloc || - likely(tsd_reentrancy_level_get(tsd) == 0)) { - tcache = tcache_get(tsd); - } else { - tcache = NULL; - } - } else { - /* + tcache_t *tcache; + if (tcache_ind == TCACHE_IND_AUTOMATIC) { + if (likely(!slow)) { + /* Getting tcache ptr unconditionally. */ + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else if (is_alloc + || likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } else { + /* * Should not specify tcache on deallocation path when being * reentrant. */ - assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 || - tsd_state_nocleanup(tsd)); - if (tcache_ind == TCACHE_IND_NONE) { - tcache = NULL; - } else { - tcache = tcaches_get(tsd, tcache_ind); - } - } - return tcache; + assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 + || tsd_state_nocleanup(tsd)); + if (tcache_ind == TCACHE_IND_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, tcache_ind); + } + } + return tcache; } JEMALLOC_ALWAYS_INLINE bool maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) { - if (config_opt_size_checks) { - emap_alloc_ctx_t dbg_ctx; - emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, - &dbg_ctx); - if (alloc_ctx->szind != dbg_ctx.szind) { - safety_check_fail_sized_dealloc( - /* current_dealloc */ true, ptr, - /* true_size */ emap_alloc_ctx_usize_get(&dbg_ctx), - /* input_size */ emap_alloc_ctx_usize_get( - alloc_ctx)); - return true; - } - if (alloc_ctx->slab != dbg_ctx.slab) { - safety_check_fail( - "Internal heap corruption detected: " - "mismatch in slab bit"); - return true; - } - } - return false; + if (config_opt_size_checks) { + emap_alloc_ctx_t dbg_ctx; + emap_alloc_ctx_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &dbg_ctx); + if (alloc_ctx->szind != dbg_ctx.szind) { + safety_check_fail_sized_dealloc( + /* current_dealloc */ true, ptr, + /* true_size */ emap_alloc_ctx_usize_get(&dbg_ctx), + /* input_size */ + emap_alloc_ctx_usize_get(alloc_ctx)); + return true; + } + if (alloc_ctx->slab != dbg_ctx.slab) { + safety_check_fail( + "Internal heap corruption detected: " + "mismatch in slab bit"); + return true; + } + } + return false; } JEMALLOC_ALWAYS_INLINE bool @@ -447,7 +447,7 @@ prof_sample_aligned(const void *ptr) { JEMALLOC_ALWAYS_INLINE bool free_fastpath_nonfast_aligned(void *ptr, bool check_prof) { - /* + /* * free_fastpath do not handle two uncommon cases: 1) sampled profiled * objects and 2) sampled junk & stash for use-after-free detection. * Both have special alignments which are used to escape the fastpath. @@ -456,144 +456,145 @@ free_fastpath_nonfast_aligned(void *ptr, bool check_prof) { * are enabled (the assertion below). Avoiding redundant checks since * this is on the fastpath -- at most one runtime branch from this. */ - if (config_debug && cache_bin_nonfast_aligned(ptr)) { - assert(prof_sample_aligned(ptr)); - } + if (config_debug && cache_bin_nonfast_aligned(ptr)) { + assert(prof_sample_aligned(ptr)); + } - if (config_prof && check_prof) { - /* When prof is enabled, the prof_sample alignment is enough. */ - if (prof_sample_aligned(ptr)) { - return true; - } else { - return false; - } - } + if (config_prof && check_prof) { + /* When prof is enabled, the prof_sample alignment is enough. */ + if (prof_sample_aligned(ptr)) { + return true; + } else { + return false; + } + } - if (config_uaf_detection) { - if (cache_bin_nonfast_aligned(ptr)) { - return true; - } else { - return false; - } - } + if (config_uaf_detection) { + if (cache_bin_nonfast_aligned(ptr)) { + return true; + } else { + return false; + } + } - return false; + return false; } /* Returns whether or not the free attempt was successful. */ JEMALLOC_ALWAYS_INLINE -bool free_fastpath(void *ptr, size_t size, bool size_hint) { - tsd_t *tsd = tsd_get(false); - /* The branch gets optimized away unless tsd_get_allocates(). */ - if (unlikely(tsd == NULL)) { - return false; - } - /* +bool +free_fastpath(void *ptr, size_t size, bool size_hint) { + tsd_t *tsd = tsd_get(false); + /* The branch gets optimized away unless tsd_get_allocates(). */ + if (unlikely(tsd == NULL)) { + return false; + } + /* * The tsd_fast() / initialized checks are folded into the branch * testing (deallocated_after >= threshold) later in this function. * The threshold will be set to 0 when !tsd_fast. */ - assert(tsd_fast(tsd) || - *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0); + assert(tsd_fast(tsd) + || *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0); - emap_alloc_ctx_t alloc_ctx JEMALLOC_CC_SILENCE_INIT({0, 0, false}); - size_t usize; - if (!size_hint) { - bool err = emap_alloc_ctx_try_lookup_fast(tsd, - &arena_emap_global, ptr, &alloc_ctx); + emap_alloc_ctx_t alloc_ctx JEMALLOC_CC_SILENCE_INIT({0, 0, false}); + size_t usize; + if (!size_hint) { + bool err = emap_alloc_ctx_try_lookup_fast( + tsd, &arena_emap_global, ptr, &alloc_ctx); - /* Note: profiled objects will have alloc_ctx.slab set */ - if (unlikely(err || !alloc_ctx.slab || - free_fastpath_nonfast_aligned(ptr, - /* check_prof */ false))) { - return false; - } - assert(alloc_ctx.szind != SC_NSIZES); + /* Note: profiled objects will have alloc_ctx.slab set */ + if (unlikely(err || !alloc_ctx.slab + || free_fastpath_nonfast_aligned(ptr, + /* check_prof */ false))) { + return false; + } + assert(alloc_ctx.szind != SC_NSIZES); usize = sz_index2size(alloc_ctx.szind); - } else { - /* + } else { + /* * Check for both sizes that are too large, and for sampled / * special aligned objects. The alignment check will also check * for null ptr. */ - if (unlikely(size > SC_LOOKUP_MAXCLASS || - free_fastpath_nonfast_aligned(ptr, - /* check_prof */ true))) { - return false; - } + if (unlikely(size > SC_LOOKUP_MAXCLASS + || free_fastpath_nonfast_aligned(ptr, + /* check_prof */ true))) { + return false; + } sz_size2index_usize_fastpath(size, &alloc_ctx.szind, &usize); - /* Max lookup class must be small. */ - assert(alloc_ctx.szind < SC_NBINS); - /* This is a dead store, except when opt size checking is on. */ - alloc_ctx.slab = true; - } - /* + /* Max lookup class must be small. */ + assert(alloc_ctx.szind < SC_NBINS); + /* This is a dead store, except when opt size checking is on. */ + alloc_ctx.slab = true; + } + /* * Currently the fastpath only handles small sizes. The branch on * SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking * tcache szind upper limit (i.e. tcache_max) as well. */ - assert(alloc_ctx.slab); + assert(alloc_ctx.slab); - uint64_t deallocated, threshold; - te_free_fastpath_ctx(tsd, &deallocated, &threshold); + uint64_t deallocated, threshold; + te_free_fastpath_ctx(tsd, &deallocated, &threshold); - uint64_t deallocated_after = deallocated + usize; - /* + uint64_t deallocated_after = deallocated + usize; + /* * Check for events and tsd non-nominal (fast_threshold will be set to * 0) in a single branch. Note that this handles the uninitialized case * as well (TSD init will be triggered on the non-fastpath). Therefore * anything depends on a functional TSD (e.g. the alloc_ctx sanity check * below) needs to be after this branch. */ - if (unlikely(deallocated_after >= threshold)) { - return false; - } - assert(tsd_fast(tsd)); - bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); - if (fail) { - /* See the comment in isfree. */ - return true; - } + if (unlikely(deallocated_after >= threshold)) { + return false; + } + assert(tsd_fast(tsd)); + bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); + if (fail) { + /* See the comment in isfree. */ + return true; + } - tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, - /* slow */ false, /* is_alloc */ false); - cache_bin_t *bin = &tcache->bins[alloc_ctx.szind]; + tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, + /* slow */ false, /* is_alloc */ false); + cache_bin_t *bin = &tcache->bins[alloc_ctx.szind]; - /* + /* * If junking were enabled, this is where we would do it. It's not * though, since we ensured above that we're on the fast path. Assert * that to double-check. */ - assert(!opt_junk_free); + assert(!opt_junk_free); - if (!cache_bin_dalloc_easy(bin, ptr)) { - return false; - } + if (!cache_bin_dalloc_easy(bin, ptr)) { + return false; + } - *tsd_thread_deallocatedp_get(tsd) = deallocated_after; + *tsd_thread_deallocatedp_get(tsd) = deallocated_after; - return true; + return true; } JEMALLOC_ALWAYS_INLINE void JEMALLOC_NOTHROW je_sdallocx_noflags(void *ptr, size_t size) { - if (!free_fastpath(ptr, size, true)) { - sdallocx_default(ptr, size, 0); - } + if (!free_fastpath(ptr, size, true)) { + sdallocx_default(ptr, size, 0); + } } JEMALLOC_ALWAYS_INLINE void JEMALLOC_NOTHROW je_sdallocx_impl(void *ptr, size_t size, int flags) { - if (flags != 0 || !free_fastpath(ptr, size, true)) { - sdallocx_default(ptr, size, flags); - } + if (flags != 0 || !free_fastpath(ptr, size, true)) { + sdallocx_default(ptr, size, flags); + } } JEMALLOC_ALWAYS_INLINE void JEMALLOC_NOTHROW je_free_impl(void *ptr) { - if (!free_fastpath(ptr, 0, false)) { - free_default(ptr); - } + if (!free_fastpath(ptr, 0, false)) { + free_default(ptr); + } } #endif /* JEMALLOC_INTERNAL_INLINES_C_H */ diff --git a/include/jemalloc/internal/jemalloc_internal_macros.h b/include/jemalloc/internal/jemalloc_internal_macros.h index 407e868a..eb1ca119 100644 --- a/include/jemalloc/internal/jemalloc_internal_macros.h +++ b/include/jemalloc/internal/jemalloc_internal_macros.h @@ -2,45 +2,46 @@ #define JEMALLOC_INTERNAL_MACROS_H #ifdef JEMALLOC_DEBUG -# define JEMALLOC_ALWAYS_INLINE static inline +# define JEMALLOC_ALWAYS_INLINE static inline #else -# ifdef _MSC_VER -# define JEMALLOC_ALWAYS_INLINE static __forceinline -# else -# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline -# endif +# ifdef _MSC_VER +# define JEMALLOC_ALWAYS_INLINE static __forceinline +# else +# define JEMALLOC_ALWAYS_INLINE \ + JEMALLOC_ATTR(always_inline) static inline +# endif #endif #ifdef _MSC_VER -# define inline _inline +# define inline _inline #endif #define UNUSED JEMALLOC_ATTR(unused) -#define ZU(z) ((size_t)z) -#define ZD(z) ((ssize_t)z) -#define QU(q) ((uint64_t)q) -#define QD(q) ((int64_t)q) +#define ZU(z) ((size_t)z) +#define ZD(z) ((ssize_t)z) +#define QU(q) ((uint64_t)q) +#define QD(q) ((int64_t)q) -#define KZU(z) ZU(z##ULL) -#define KZD(z) ZD(z##LL) -#define KQU(q) QU(q##ULL) -#define KQD(q) QI(q##LL) +#define KZU(z) ZU(z##ULL) +#define KZD(z) ZD(z##LL) +#define KQU(q) QU(q##ULL) +#define KQD(q) QI(q##LL) #ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) -# define restrict +# define restrict #endif /* Various function pointers are static and immutable except during testing. */ #ifdef JEMALLOC_JET -# define JET_MUTABLE -# define JET_EXTERN extern +# define JET_MUTABLE +# define JET_EXTERN extern #else -# define JET_MUTABLE const -# define JET_EXTERN static +# define JET_MUTABLE const +# define JET_EXTERN static #endif #define JEMALLOC_VA_ARGS_HEAD(head, ...) head @@ -48,91 +49,93 @@ /* Diagnostic suppression macros */ #if defined(_MSC_VER) && !defined(__clang__) -# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) -# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop)) -# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W)) -# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS -# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS -# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS -# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN -# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED -# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS +# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) +# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop)) +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable : W)) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS /* #pragma GCC diagnostic first appeared in gcc 4.6. */ -#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \ - (__GNUC_MINOR__ > 5)))) || defined(__clang__) +#elif (defined(__GNUC__) \ + && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) \ + || defined(__clang__) /* * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang * diagnostic suppression macros and should not be used anywhere else. */ -# define JEMALLOC_PRAGMA__(X) _Pragma(#X) -# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push) -# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop) -# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \ - JEMALLOC_PRAGMA__(GCC diagnostic ignored W) +# define JEMALLOC_PRAGMA__(X) _Pragma(#X) +# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push) +# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop) +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \ + JEMALLOC_PRAGMA__(GCC diagnostic ignored W) /* * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and * all clang versions up to version 7 (currently trunk, unreleased). This macro * suppresses the warning for the affected compiler versions only. */ -# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \ - defined(__clang__) -# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \ - JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers") -# else -# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS -# endif +# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) \ + || defined(__clang__) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \ + JEMALLOC_DIAGNOSTIC_IGNORE( \ + "-Wmissing-field-initializers") +# else +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# endif -# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS \ - JEMALLOC_DIAGNOSTIC_IGNORE("-Wframe-address") -# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \ - JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits") -# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \ - JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter") -# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7) -# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \ - JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=") -# else -# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN -# endif -# ifdef JEMALLOC_HAVE_ATTR_DEPRECATED -# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \ - JEMALLOC_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations") -# else -# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED -# endif -# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \ - JEMALLOC_DIAGNOSTIC_PUSH \ - JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER +# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wframe-address") +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits") +# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter") +# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7) +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=") +# else +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# endif +# ifdef JEMALLOC_HAVE_ATTR_DEPRECATED +# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations") +# else +# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED +# endif +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \ + JEMALLOC_DIAGNOSTIC_PUSH \ + JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER #else -# define JEMALLOC_DIAGNOSTIC_PUSH -# define JEMALLOC_DIAGNOSTIC_POP -# define JEMALLOC_DIAGNOSTIC_IGNORE(W) -# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS -# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS -# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS -# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN -# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED -# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS +# define JEMALLOC_DIAGNOSTIC_PUSH +# define JEMALLOC_DIAGNOSTIC_POP +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# define JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# define JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS #endif #ifdef __clang_analyzer__ -# define JEMALLOC_CLANG_ANALYZER +# define JEMALLOC_CLANG_ANALYZER #endif #ifdef JEMALLOC_CLANG_ANALYZER -# define JEMALLOC_CLANG_ANALYZER_SUPPRESS __attribute__((suppress)) -# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v) = v +# define JEMALLOC_CLANG_ANALYZER_SUPPRESS __attribute__((suppress)) +# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v) = v #else -# define JEMALLOC_CLANG_ANALYZER_SUPPRESS -# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v) +# define JEMALLOC_CLANG_ANALYZER_SUPPRESS +# define JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(v) #endif -#define JEMALLOC_SUPPRESS_WARN_ON_USAGE(...) \ - JEMALLOC_DIAGNOSTIC_PUSH \ - JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \ - __VA_ARGS__ \ - JEMALLOC_DIAGNOSTIC_POP +#define JEMALLOC_SUPPRESS_WARN_ON_USAGE(...) \ + JEMALLOC_DIAGNOSTIC_PUSH \ + JEMALLOC_DIAGNOSTIC_IGNORE_DEPRECATED \ + __VA_ARGS__ \ + JEMALLOC_DIAGNOSTIC_POP /* * Disables spurious diagnostics for all headers. Since these headers are not diff --git a/include/jemalloc/internal/jemalloc_internal_overrides.h b/include/jemalloc/internal/jemalloc_internal_overrides.h index 5fbbe249..bf74a612 100644 --- a/include/jemalloc/internal/jemalloc_internal_overrides.h +++ b/include/jemalloc/internal/jemalloc_internal_overrides.h @@ -9,13 +9,14 @@ */ #ifdef JEMALLOC_OVERRIDE_LG_PAGE - #undef LG_PAGE - #define LG_PAGE JEMALLOC_OVERRIDE_LG_PAGE +# undef LG_PAGE +# define LG_PAGE JEMALLOC_OVERRIDE_LG_PAGE #endif #ifdef JEMALLOC_OVERRIDE_JEMALLOC_CONFIG_MALLOC_CONF - #undef JEMALLOC_CONFIG_MALLOC_CONF - #define JEMALLOC_CONFIG_MALLOC_CONF JEMALLOC_OVERRIDE_JEMALLOC_CONFIG_MALLOC_CONF +# undef JEMALLOC_CONFIG_MALLOC_CONF +# define JEMALLOC_CONFIG_MALLOC_CONF \ + JEMALLOC_OVERRIDE_JEMALLOC_CONFIG_MALLOC_CONF #endif #endif /* JEMALLOC_INTERNAL_OVERRIDES_H */ diff --git a/include/jemalloc/internal/jemalloc_internal_types.h b/include/jemalloc/internal/jemalloc_internal_types.h index cddbfb65..0ade5461 100644 --- a/include/jemalloc/internal/jemalloc_internal_types.h +++ b/include/jemalloc/internal/jemalloc_internal_types.h @@ -18,13 +18,13 @@ enum zero_realloc_action_e { typedef enum zero_realloc_action_e zero_realloc_action_t; /* Signature of write callback. */ -typedef void (write_cb_t)(void *, const char *); +typedef void(write_cb_t)(void *, const char *); enum malloc_init_e { - malloc_init_uninitialized = 3, - malloc_init_a0_initialized = 2, - malloc_init_recursible = 1, - malloc_init_initialized = 0 /* Common case --> jnz. */ + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ }; typedef enum malloc_init_e malloc_init_t; @@ -39,48 +39,46 @@ typedef enum malloc_init_e malloc_init_t; * * aaaaaaaa aaaatttt tttttttt 0znnnnnn */ -#define MALLOCX_ARENA_BITS 12 -#define MALLOCX_TCACHE_BITS 12 -#define MALLOCX_LG_ALIGN_BITS 6 -#define MALLOCX_ARENA_SHIFT 20 -#define MALLOCX_TCACHE_SHIFT 8 -#define MALLOCX_ARENA_MASK \ - ((unsigned)(((1U << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)) +#define MALLOCX_ARENA_BITS 12 +#define MALLOCX_TCACHE_BITS 12 +#define MALLOCX_LG_ALIGN_BITS 6 +#define MALLOCX_ARENA_SHIFT 20 +#define MALLOCX_TCACHE_SHIFT 8 +#define MALLOCX_ARENA_MASK \ + ((unsigned)(((1U << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)) /* NB: Arena index bias decreases the maximum number of arenas by 1. */ -#define MALLOCX_ARENA_LIMIT ((unsigned)((1U << MALLOCX_ARENA_BITS) - 1)) -#define MALLOCX_TCACHE_MASK \ - ((unsigned)(((1U << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)) -#define MALLOCX_TCACHE_MAX ((unsigned)((1U << MALLOCX_TCACHE_BITS) - 3)) -#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) +#define MALLOCX_ARENA_LIMIT ((unsigned)((1U << MALLOCX_ARENA_BITS) - 1)) +#define MALLOCX_TCACHE_MASK \ + ((unsigned)(((1U << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)) +#define MALLOCX_TCACHE_MAX ((unsigned)((1U << MALLOCX_TCACHE_BITS) - 3)) +#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ -#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ - (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) -#define MALLOCX_ALIGN_GET(flags) \ - (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) -#define MALLOCX_ZERO_GET(flags) \ - ((bool)(flags & MALLOCX_ZERO)) +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX - 1)) +#define MALLOCX_ZERO_GET(flags) ((bool)(flags & MALLOCX_ZERO)) -#define MALLOCX_TCACHE_GET(flags) \ - (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2) -#define MALLOCX_ARENA_GET(flags) \ - (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) \ + - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) /* Smallest size class to support. */ -#define TINY_MIN (1U << LG_TINY_MIN) +#define TINY_MIN (1U << LG_TINY_MIN) -#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) -#define LONG_MASK (LONG - 1) +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) /* Return the smallest long multiple that is >= a. */ -#define LONG_CEILING(a) \ - (((a) + LONG_MASK) & ~LONG_MASK) +#define LONG_CEILING(a) (((a) + LONG_MASK) & ~LONG_MASK) -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) -#define PTR_MASK (SIZEOF_PTR - 1) +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) /* Return the smallest (void *) multiple that is >= a. */ -#define PTR_CEILING(a) \ - (((a) + PTR_MASK) & ~PTR_MASK) +#define PTR_CEILING(a) (((a) + PTR_MASK) & ~PTR_MASK) /* * Maximum size of L1 cache line. This is used to avoid cache line aliasing. @@ -89,25 +87,24 @@ typedef enum malloc_init_e malloc_init_t; * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can * only handle raw constants. */ -#define LG_CACHELINE 6 -#define CACHELINE 64 -#define CACHELINE_MASK (CACHELINE - 1) +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) /* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) +#define CACHELINE_CEILING(s) (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) /* Return the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)(((byte_t *)(a)) - (((uintptr_t)(a)) - \ - ((uintptr_t)(a) & ((~(alignment)) + 1))))) +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)(((byte_t *)(a)) \ + - (((uintptr_t)(a)) - ((uintptr_t)(a) & ((~(alignment)) + 1))))) /* Return the offset between a and the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ ((size_t)((uintptr_t)(a) & (alignment - 1))) /* Return the smallest alignment multiple that is >= s. */ -#define ALIGNMENT_CEILING(s, alignment) \ +#define ALIGNMENT_CEILING(s, alignment) \ (((s) + (alignment - 1)) & ((~(alignment)) + 1)) /* @@ -119,30 +116,31 @@ typedef enum malloc_init_e malloc_init_t; * provenance from the compiler. See the block-comment on the * definition of `byte_t` for more details. */ -#define ALIGNMENT_ADDR2CEILING(a, alignment) \ - ((void *)(((byte_t *)(a)) + (((((uintptr_t)(a)) + \ - (alignment - 1)) & ((~(alignment)) + 1)) - ((uintptr_t)(a))))) +#define ALIGNMENT_ADDR2CEILING(a, alignment) \ + ((void *)(((byte_t *)(a)) \ + + (((((uintptr_t)(a)) + (alignment - 1)) & ((~(alignment)) + 1)) \ + - ((uintptr_t)(a))))) /* Declare a variable-length array. */ #if __STDC_VERSION__ < 199901L || defined(__STDC_NO_VLA__) -# ifdef _MSC_VER -# include -# define alloca _alloca -# else -# ifdef JEMALLOC_HAS_ALLOCA_H -# include -# else -# include -# endif -# endif -# define VARIABLE_ARRAY_UNSAFE(type, name, count) \ - type *name = alloca(sizeof(type) * (count)) +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include +# else +# include +# endif +# endif +# define VARIABLE_ARRAY_UNSAFE(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) #else -# define VARIABLE_ARRAY_UNSAFE(type, name, count) type name[(count)] +# define VARIABLE_ARRAY_UNSAFE(type, name, count) type name[(count)] #endif -#define VARIABLE_ARRAY_SIZE_MAX 2048 -#define VARIABLE_ARRAY(type, name, count) \ - assert(sizeof(type) * (count) <= VARIABLE_ARRAY_SIZE_MAX); \ +#define VARIABLE_ARRAY_SIZE_MAX 2048 +#define VARIABLE_ARRAY(type, name, count) \ + assert(sizeof(type) * (count) <= VARIABLE_ARRAY_SIZE_MAX); \ VARIABLE_ARRAY_UNSAFE(type, name, count) #define CALLOC_MADVISE_THRESHOLD_DEFAULT (((size_t)1) << 23) /* 8 MB */ diff --git a/include/jemalloc/internal/large_externs.h b/include/jemalloc/internal/large_externs.h index ce9c8689..7cee6752 100644 --- a/include/jemalloc/internal/large_externs.h +++ b/include/jemalloc/internal/large_externs.h @@ -6,20 +6,20 @@ #include "jemalloc/internal/hook.h" void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); -void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero); -bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min, - size_t usize_max, bool zero); +void *large_palloc( + tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero); +bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min, + size_t usize_max, bool zero); void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args); -void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata); -void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata); -void large_dalloc(tsdn_t *tsdn, edata_t *edata); +void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata); +void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata); +void large_dalloc(tsdn_t *tsdn, edata_t *edata); size_t large_salloc(tsdn_t *tsdn, const edata_t *edata); -void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, - bool reset_recent); +void large_prof_info_get( + tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, bool reset_recent); void large_prof_tctx_reset(edata_t *edata); void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size); diff --git a/include/jemalloc/internal/lockedint.h b/include/jemalloc/internal/lockedint.h index 062dedbf..46aba8ff 100644 --- a/include/jemalloc/internal/lockedint.h +++ b/include/jemalloc/internal/lockedint.h @@ -30,33 +30,34 @@ struct locked_zu_s { }; #ifndef JEMALLOC_ATOMIC_U64 -# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name; -# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \ - malloc_mutex_init(&(mu), name, rank, rank_mode) -# define LOCKEDINT_MTX(mtx) (&(mtx)) -# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu)) -# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu)) -# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu)) -# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \ - malloc_mutex_postfork_parent(tsdn, &(mu)) -# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \ - malloc_mutex_postfork_child(tsdn, &(mu)) +# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name; +# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \ + malloc_mutex_init(&(mu), name, rank, rank_mode) +# define LOCKEDINT_MTX(mtx) (&(mtx)) +# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu)) +# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu)) +# define LOCKEDINT_MTX_PREFORK(tsdn, mu) \ + malloc_mutex_prefork(tsdn, &(mu)) +# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \ + malloc_mutex_postfork_parent(tsdn, &(mu)) +# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \ + malloc_mutex_postfork_child(tsdn, &(mu)) #else -# define LOCKEDINT_MTX_DECLARE(name) -# define LOCKEDINT_MTX(mtx) NULL -# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false -# define LOCKEDINT_MTX_LOCK(tsdn, mu) -# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) -# define LOCKEDINT_MTX_PREFORK(tsdn, mu) -# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) -# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) +# define LOCKEDINT_MTX_DECLARE(name) +# define LOCKEDINT_MTX(mtx) NULL +# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false +# define LOCKEDINT_MTX_LOCK(tsdn, mu) +# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) +# define LOCKEDINT_MTX_PREFORK(tsdn, mu) +# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) +# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) #endif #ifdef JEMALLOC_ATOMIC_U64 -# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL) +# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL) #else -# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \ - malloc_mutex_assert_owner(tsdn, (mtx)) +# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \ + malloc_mutex_assert_owner(tsdn, (mtx)) #endif static inline uint64_t @@ -70,8 +71,7 @@ locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) { } static inline void -locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, - uint64_t x) { +locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, uint64_t x) { LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); #ifdef JEMALLOC_ATOMIC_U64 atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED); @@ -81,8 +81,7 @@ locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, } static inline void -locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, - uint64_t x) { +locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, uint64_t x) { LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); #ifdef JEMALLOC_ATOMIC_U64 uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED); @@ -99,7 +98,7 @@ locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, const uint64_t x, const uint64_t modulus) { LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); uint64_t before, after; - bool overflow; + bool overflow; #ifdef JEMALLOC_ATOMIC_U64 before = atomic_load_u64(&p->val, ATOMIC_RELAXED); do { @@ -109,8 +108,8 @@ locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, if (overflow) { after %= modulus; } - } while (!atomic_compare_exchange_weak_u64(&p->val, &before, after, - ATOMIC_RELAXED, ATOMIC_RELAXED)); + } while (!atomic_compare_exchange_weak_u64( + &p->val, &before, after, ATOMIC_RELAXED, ATOMIC_RELAXED)); #else before = p->val; after = before + x; @@ -167,8 +166,7 @@ locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) { } static inline void -locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, - size_t x) { +locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, size_t x) { LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); #ifdef JEMALLOC_ATOMIC_U64 atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED); @@ -179,8 +177,7 @@ locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, } static inline void -locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, - size_t x) { +locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, size_t x) { LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); #ifdef JEMALLOC_ATOMIC_U64 size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED); diff --git a/include/jemalloc/internal/log.h b/include/jemalloc/internal/log.h index 7b074abd..f213beda 100644 --- a/include/jemalloc/internal/log.h +++ b/include/jemalloc/internal/log.h @@ -7,9 +7,9 @@ #include "jemalloc/internal/mutex.h" #ifdef JEMALLOC_LOG -# define JEMALLOC_LOG_VAR_BUFSIZE 1000 +# define JEMALLOC_LOG_VAR_BUFSIZE 1000 #else -# define JEMALLOC_LOG_VAR_BUFSIZE 1 +# define JEMALLOC_LOG_VAR_BUFSIZE 1 #endif #define JEMALLOC_LOG_BUFSIZE 4096 @@ -36,7 +36,7 @@ * statements. */ -extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; +extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; extern atomic_b_t log_init_done; typedef struct log_var_s log_var_t; @@ -45,7 +45,7 @@ struct log_var_s { * Lowest bit is "inited", second lowest is "enabled". Putting them in * a single word lets us avoid any fences on weak architectures. */ - atomic_u_t state; + atomic_u_t state; const char *name; }; @@ -53,7 +53,8 @@ struct log_var_s { #define LOG_INITIALIZED_NOT_ENABLED 1U #define LOG_ENABLED 2U -#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str} +#define LOG_VAR_INIT(name_str) \ + { ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str } /* * Returns the value we should assume for state (which is not necessarily @@ -63,21 +64,21 @@ struct log_var_s { unsigned log_var_update_state(log_var_t *log_var); /* We factor out the metadata management to allow us to test more easily. */ -#define log_do_begin(log_var) \ -if (config_log) { \ - unsigned log_state = atomic_load_u(&(log_var).state, \ - ATOMIC_RELAXED); \ - if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \ - log_state = log_var_update_state(&(log_var)); \ - assert(log_state != LOG_NOT_INITIALIZED); \ - } \ - if (log_state == LOG_ENABLED) { \ - { - /* User code executes here. */ -#define log_do_end(log_var) \ - } \ - } \ -} +#define log_do_begin(log_var) \ + if (config_log) { \ + unsigned log_state = atomic_load_u( \ + &(log_var).state, ATOMIC_RELAXED); \ + if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \ + log_state = log_var_update_state(&(log_var)); \ + assert(log_state != LOG_NOT_INITIALIZED); \ + } \ + if (log_state == LOG_ENABLED) { \ + { +/* User code executes here. */ +#define log_do_end(log_var) \ + } \ + } \ + } /* * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during @@ -88,28 +89,29 @@ if (config_log) { \ */ static inline void log_impl_varargs(const char *name, ...) { - char buf[JEMALLOC_LOG_BUFSIZE]; + char buf[JEMALLOC_LOG_BUFSIZE]; va_list ap; va_start(ap, name); const char *format = va_arg(ap, const char *); - size_t dst_offset = 0; + size_t dst_offset = 0; dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name); - dst_offset += malloc_vsnprintf(buf + dst_offset, - JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap); - malloc_snprintf(buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, "\n"); + dst_offset += malloc_vsnprintf( + buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap); + malloc_snprintf( + buf + dst_offset, JEMALLOC_LOG_BUFSIZE - dst_offset, "\n"); va_end(ap); malloc_write(buf); } /* Call as log("log.var.str", "format_string %d", arg_for_format_string); */ -#define LOG(log_var_str, ...) \ -do { \ - static log_var_t log_var = LOG_VAR_INIT(log_var_str); \ - log_do_begin(log_var) \ - log_impl_varargs((log_var).name, __VA_ARGS__); \ - log_do_end(log_var) \ -} while (0) +#define LOG(log_var_str, ...) \ + do { \ + static log_var_t log_var = LOG_VAR_INIT(log_var_str); \ + log_do_begin(log_var) \ + log_impl_varargs((log_var).name, __VA_ARGS__); \ + log_do_end(log_var) \ + } while (0) #endif /* JEMALLOC_INTERNAL_LOG_H */ diff --git a/include/jemalloc/internal/malloc_io.h b/include/jemalloc/internal/malloc_io.h index 9c7c6ec2..0f82f678 100644 --- a/include/jemalloc/internal/malloc_io.h +++ b/include/jemalloc/internal/malloc_io.h @@ -5,64 +5,63 @@ #include "jemalloc/internal/jemalloc_internal_types.h" #ifdef _WIN32 -# ifdef _WIN64 -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "ll" -# else -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "" -# endif -# define FMTd32 "d" -# define FMTu32 "u" -# define FMTx32 "x" -# define FMTd64 FMT64_PREFIX "d" -# define FMTu64 FMT64_PREFIX "u" -# define FMTx64 FMT64_PREFIX "x" -# define FMTdPTR FMTPTR_PREFIX "d" -# define FMTuPTR FMTPTR_PREFIX "u" -# define FMTxPTR FMTPTR_PREFIX "x" +# ifdef _WIN64 +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "ll" +# else +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "" +# endif +# define FMTd32 "d" +# define FMTu32 "u" +# define FMTx32 "x" +# define FMTd64 FMT64_PREFIX "d" +# define FMTu64 FMT64_PREFIX "u" +# define FMTx64 FMT64_PREFIX "x" +# define FMTdPTR FMTPTR_PREFIX "d" +# define FMTuPTR FMTPTR_PREFIX "u" +# define FMTxPTR FMTPTR_PREFIX "x" #else -# include -# define FMTd32 PRId32 -# define FMTu32 PRIu32 -# define FMTx32 PRIx32 -# define FMTd64 PRId64 -# define FMTu64 PRIu64 -# define FMTx64 PRIx64 -# define FMTdPTR PRIdPTR -# define FMTuPTR PRIuPTR -# define FMTxPTR PRIxPTR +# include +# define FMTd32 PRId32 +# define FMTu32 PRIu32 +# define FMTx32 PRIx32 +# define FMTd64 PRId64 +# define FMTu64 PRIu64 +# define FMTx64 PRIx64 +# define FMTdPTR PRIdPTR +# define FMTuPTR PRIuPTR +# define FMTxPTR PRIxPTR #endif /* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 +#define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ -#define MALLOC_PRINTF_BUFSIZE 4096 +#define MALLOC_PRINTF_BUFSIZE 4096 write_cb_t wrtmessage; -int buferror(int err, char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, - int base); +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax( + const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ -size_t malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); +size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); size_t malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); /* * The caller can set write_cb to null to choose to print with the * je_malloc_message hook. */ -void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format, - va_list ap); +void malloc_vcprintf( + write_cb_t *write_cb, void *cbopaque, const char *format, va_list ap); void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); @@ -81,10 +80,10 @@ malloc_write_fd_syscall(int fd, const void *buf, size_t count) { long result = syscall(SYS_write, fd, buf, count); #else ssize_t result = (ssize_t)write(fd, buf, -#ifdef _WIN32 +# ifdef _WIN32 (unsigned int) -#endif - count); +# endif + count); #endif return (ssize_t)result; } @@ -110,10 +109,10 @@ malloc_read_fd_syscall(int fd, void *buf, size_t count) { long result = syscall(SYS_read, fd, buf, count); #else ssize_t result = read(fd, buf, -#ifdef _WIN32 +# ifdef _WIN32 (unsigned int) -#endif - count); +# endif + count); #endif return (ssize_t)result; } @@ -122,8 +121,8 @@ static inline ssize_t malloc_read_fd(int fd, void *buf, size_t count) { size_t bytes_read = 0; do { - ssize_t result = malloc_read_fd_syscall(fd, - &((byte_t *)buf)[bytes_read], count - bytes_read); + ssize_t result = malloc_read_fd_syscall( + fd, &((byte_t *)buf)[bytes_read], count - bytes_read); if (result < 0) { return result; } else if (result == 0) { @@ -134,7 +133,8 @@ malloc_read_fd(int fd, void *buf, size_t count) { return bytes_read; } -static inline int malloc_open(const char *path, int flags) { +static inline int +malloc_open(const char *path, int flags) { int fd; #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) fd = (int)syscall(SYS_open, path, flags); @@ -146,7 +146,8 @@ static inline int malloc_open(const char *path, int flags) { return fd; } -static inline int malloc_close(int fd) { +static inline int +malloc_close(int fd) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) return (int)syscall(SYS_close, fd); #else @@ -154,11 +155,12 @@ static inline int malloc_close(int fd) { #endif } -static inline off_t malloc_lseek(int fd, off_t offset, int whence) { +static inline off_t +malloc_lseek(int fd, off_t offset, int whence) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_lseek) - return (off_t)syscall(SYS_lseek, fd, offset, whence); + return (off_t)syscall(SYS_lseek, fd, offset, whence); #else - return lseek(fd, offset, whence); + return lseek(fd, offset, whence); #endif } diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h index db2bdf37..943c7928 100644 --- a/include/jemalloc/internal/mutex.h +++ b/include/jemalloc/internal/mutex.h @@ -31,7 +31,7 @@ struct malloc_mutex_s { * avoid prefetching a modified cacheline (for the * unlocking thread). */ - mutex_prof_data_t prof_data; + mutex_prof_data_t prof_data; /* * Hint flag to avoid exclusive cache line contention * during spin waiting. Placed along with prof_data @@ -39,20 +39,20 @@ struct malloc_mutex_s { * Modified by the lock owner only (after acquired, and * before release), and may be read by other threads. */ - atomic_b_t locked; + atomic_b_t locked; #ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - SRWLOCK lock; -# else - CRITICAL_SECTION lock; -# endif +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock lock; + os_unfair_lock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; #else - pthread_mutex_t lock; + pthread_mutex_t lock; #endif }; /* @@ -62,82 +62,118 @@ struct malloc_mutex_s { * memory cost. */ #if !defined(JEMALLOC_DEBUG) - witness_t witness; - malloc_mutex_lock_order_t lock_order; + witness_t witness; + malloc_mutex_lock_order_t lock_order; #endif }; #if defined(JEMALLOC_DEBUG) - witness_t witness; - malloc_mutex_lock_order_t lock_order; + witness_t witness; + malloc_mutex_lock_order_t lock_order; #endif }; #ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 -# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) -# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) -# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) -# else -# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) -# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) -# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) -# endif +# if _WIN32_WINNT >= 0x0600 +# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) \ + ReleaseSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) \ + (!TryAcquireSRWLockExclusive(&(m)->lock)) +# else +# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) \ + (!TryEnterCriticalSection(&(m)->lock)) +# endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) -# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) -# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) -# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) +# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) #else -# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) -# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) -# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) +# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) #endif -#define LOCK_PROF_DATA_INITIALIZER \ - {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ - ATOMIC_INIT(0), 0, NULL, 0} +#define LOCK_PROF_DATA_INITIALIZER \ + { \ + NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ + ATOMIC_INIT(0), 0, NULL, 0 \ + } #ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER +# define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) -# if defined(JEMALLOC_DEBUG) -# define MALLOC_MUTEX_INITIALIZER \ - {{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} -# else -# define MALLOC_MUTEX_INITIALIZER \ - {{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} -# endif +# if defined(JEMALLOC_DEBUG) +# define MALLOC_MUTEX_INITIALIZER \ + { \ + {{LOCK_PROF_DATA_INITIALIZER, \ + ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \ + WITNESS_INITIALIZER( \ + "mutex", WITNESS_RANK_OMIT), \ + 0 \ + } +# else +# define MALLOC_MUTEX_INITIALIZER \ + { \ + {{LOCK_PROF_DATA_INITIALIZER, \ + ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \ + WITNESS_INITIALIZER( \ + "mutex", WITNESS_RANK_OMIT) \ + } +# endif #elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# if (defined(JEMALLOC_DEBUG)) -# define MALLOC_MUTEX_INITIALIZER \ - {{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} -# else -# define MALLOC_MUTEX_INITIALIZER \ - {{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} -# endif +# if (defined(JEMALLOC_DEBUG)) +# define MALLOC_MUTEX_INITIALIZER \ + { \ + {{LOCK_PROF_DATA_INITIALIZER, \ + ATOMIC_INIT(false), \ + PTHREAD_MUTEX_INITIALIZER, NULL}}, \ + WITNESS_INITIALIZER( \ + "mutex", WITNESS_RANK_OMIT), \ + 0 \ + } +# else +# define MALLOC_MUTEX_INITIALIZER \ + { \ + {{LOCK_PROF_DATA_INITIALIZER, \ + ATOMIC_INIT(false), \ + PTHREAD_MUTEX_INITIALIZER, NULL}}, \ + WITNESS_INITIALIZER( \ + "mutex", WITNESS_RANK_OMIT) \ + } +# endif #else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# if defined(JEMALLOC_DEBUG) -# define MALLOC_MUTEX_INITIALIZER \ - {{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} -# else -# define MALLOC_MUTEX_INITIALIZER \ - {{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} -# endif +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# if defined(JEMALLOC_DEBUG) +# define MALLOC_MUTEX_INITIALIZER \ + { \ + {{LOCK_PROF_DATA_INITIALIZER, \ + ATOMIC_INIT(false), \ + PTHREAD_MUTEX_INITIALIZER}}, \ + WITNESS_INITIALIZER( \ + "mutex", WITNESS_RANK_OMIT), \ + 0 \ + } +# else +# define MALLOC_MUTEX_INITIALIZER \ + { \ + {{LOCK_PROF_DATA_INITIALIZER, \ + ATOMIC_INIT(false), \ + PTHREAD_MUTEX_INITIALIZER}}, \ + WITNESS_INITIALIZER( \ + "mutex", WITNESS_RANK_OMIT) \ + } +# endif #endif #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true +# undef isthreaded /* Undo private_namespace.h definition. */ +# define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, @@ -214,12 +250,12 @@ malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { if (sum->max_n_thds < data->max_n_thds) { sum->max_n_thds = data->max_n_thds; } - uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, - ATOMIC_RELAXED); - uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( - &data->n_waiting_thds, ATOMIC_RELAXED); - atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, - ATOMIC_RELAXED); + uint32_t cur_n_waiting_thds = atomic_load_u32( + &sum->n_waiting_thds, ATOMIC_RELAXED); + uint32_t new_n_waiting_thds = cur_n_waiting_thds + + atomic_load_u32(&data->n_waiting_thds, ATOMIC_RELAXED); + atomic_store_u32( + &sum->n_waiting_thds, new_n_waiting_thds, ATOMIC_RELAXED); sum->n_owner_switches += data->n_owner_switches; sum->n_lock_ops += data->n_lock_ops; } @@ -274,16 +310,16 @@ malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) { /* Copy the prof data from mutex for processing. */ static inline void -malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, - malloc_mutex_t *mutex) { +malloc_mutex_prof_read( + tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) { /* Can only read holding the mutex. */ malloc_mutex_assert_owner(tsdn, mutex); malloc_mutex_prof_copy(data, &mutex->prof_data); } static inline void -malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data, - malloc_mutex_t *mutex) { +malloc_mutex_prof_accum( + tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) { mutex_prof_data_t *source = &mutex->prof_data; /* Can only read holding the mutex. */ malloc_mutex_assert_owner(tsdn, mutex); @@ -305,8 +341,8 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data, /* Compare the prof data and update to the maximum. */ static inline void -malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data, - malloc_mutex_t *mutex) { +malloc_mutex_prof_max_update( + tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_t *mutex) { mutex_prof_data_t *source = &mutex->prof_data; /* Can only read holding the mutex. */ malloc_mutex_assert_owner(tsdn, mutex); diff --git a/include/jemalloc/internal/mutex_prof.h b/include/jemalloc/internal/mutex_prof.h index 14e4340b..572200f3 100644 --- a/include/jemalloc/internal/mutex_prof.h +++ b/include/jemalloc/internal/mutex_prof.h @@ -6,76 +6,76 @@ #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/tsd_types.h" -#define MUTEX_PROF_GLOBAL_MUTEXES \ - OP(background_thread) \ - OP(max_per_bg_thd) \ - OP(ctl) \ - OP(prof) \ - OP(prof_thds_data) \ - OP(prof_dump) \ - OP(prof_recent_alloc) \ - OP(prof_recent_dump) \ - OP(prof_stats) +#define MUTEX_PROF_GLOBAL_MUTEXES \ + OP(background_thread) \ + OP(max_per_bg_thd) \ + OP(ctl) \ + OP(prof) \ + OP(prof_thds_data) \ + OP(prof_dump) \ + OP(prof_recent_alloc) \ + OP(prof_recent_dump) \ + OP(prof_stats) typedef enum { #define OP(mtx) global_prof_mutex_##mtx, MUTEX_PROF_GLOBAL_MUTEXES #undef OP - mutex_prof_num_global_mutexes + mutex_prof_num_global_mutexes } mutex_prof_global_ind_t; -#define MUTEX_PROF_ARENA_MUTEXES \ - OP(large) \ - OP(extent_avail) \ - OP(extents_dirty) \ - OP(extents_muzzy) \ - OP(extents_retained) \ - OP(decay_dirty) \ - OP(decay_muzzy) \ - OP(base) \ - OP(tcache_list) \ - OP(hpa_shard) \ - OP(hpa_shard_grow) \ - OP(hpa_sec) +#define MUTEX_PROF_ARENA_MUTEXES \ + OP(large) \ + OP(extent_avail) \ + OP(extents_dirty) \ + OP(extents_muzzy) \ + OP(extents_retained) \ + OP(decay_dirty) \ + OP(decay_muzzy) \ + OP(base) \ + OP(tcache_list) \ + OP(hpa_shard) \ + OP(hpa_shard_grow) \ + OP(hpa_sec) typedef enum { #define OP(mtx) arena_prof_mutex_##mtx, MUTEX_PROF_ARENA_MUTEXES #undef OP - mutex_prof_num_arena_mutexes + mutex_prof_num_arena_mutexes } mutex_prof_arena_ind_t; /* * The forth parameter is a boolean value that is true for derived rate counters * and false for real ones. */ -#define MUTEX_PROF_UINT64_COUNTERS \ - OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \ - OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \ - OP(num_wait, uint64_t, "n_waiting", false, num_wait) \ - OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \ - OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \ - OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \ - OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \ - OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \ - OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \ - OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \ - OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time) +#define MUTEX_PROF_UINT64_COUNTERS \ + OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \ + OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \ + OP(num_wait, uint64_t, "n_waiting", false, num_wait) \ + OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \ + OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \ + OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \ + OP(num_owner_switch, uint64_t, "n_owner_switch", false, \ + num_owner_switch) \ + OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \ + OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \ + OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \ + OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time) -#define MUTEX_PROF_UINT32_COUNTERS \ - OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds) +#define MUTEX_PROF_UINT32_COUNTERS \ + OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds) -#define MUTEX_PROF_COUNTERS \ - MUTEX_PROF_UINT64_COUNTERS \ - MUTEX_PROF_UINT32_COUNTERS +#define MUTEX_PROF_COUNTERS \ + MUTEX_PROF_UINT64_COUNTERS \ + MUTEX_PROF_UINT32_COUNTERS #define OP(counter, type, human, derived, base_counter) mutex_counter_##counter, -#define COUNTER_ENUM(counter_list, t) \ - typedef enum { \ - counter_list \ - mutex_prof_num_##t##_counters \ - } mutex_prof_##t##_counter_ind_t; +#define COUNTER_ENUM(counter_list, t) \ + typedef enum { \ + counter_list mutex_prof_num_##t##_counters \ + } mutex_prof_##t##_counter_ind_t; COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) @@ -89,17 +89,17 @@ typedef struct { * contention. We update them once we have the lock. */ /* Total time (in nano seconds) spent waiting on this mutex. */ - nstime_t tot_wait_time; + nstime_t tot_wait_time; /* Max time (in nano seconds) spent on a single lock operation. */ - nstime_t max_wait_time; + nstime_t max_wait_time; /* # of times have to wait for this mutex (after spinning). */ - uint64_t n_wait_times; + uint64_t n_wait_times; /* # of times acquired the mutex through local spinning. */ - uint64_t n_spin_acquired; + uint64_t n_spin_acquired; /* Max # of threads waiting for the mutex at the same time. */ - uint32_t max_n_thds; + uint32_t max_n_thds; /* Current # of threads waiting on the lock. Atomic synced. */ - atomic_u32_t n_waiting_thds; + atomic_u32_t n_waiting_thds; /* * Data touched on the fast path. These are modified right after we @@ -108,11 +108,11 @@ typedef struct { * cacheline. */ /* # of times the mutex holder is different than the previous one. */ - uint64_t n_owner_switches; + uint64_t n_owner_switches; /* Previous mutex holder, to facilitate n_owner_switches. */ - tsdn_t *prev_owner; + tsdn_t *prev_owner; /* # of lock() operations in total. */ - uint64_t n_lock_ops; + uint64_t n_lock_ops; } mutex_prof_data_t; #endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ diff --git a/include/jemalloc/internal/nstime.h b/include/jemalloc/internal/nstime.h index 1f32df58..a10b2de1 100644 --- a/include/jemalloc/internal/nstime.h +++ b/include/jemalloc/internal/nstime.h @@ -9,9 +9,11 @@ #define NSTIME_MAGIC ((uint32_t)0xb8a9ce37) #ifdef JEMALLOC_DEBUG -# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC} +# define NSTIME_ZERO_INITIALIZER \ + { 0, NSTIME_MAGIC } #else -# define NSTIME_ZERO_INITIALIZER {0} +# define NSTIME_ZERO_INITIALIZER \ + { 0 } #endif typedef struct { @@ -23,43 +25,40 @@ typedef struct { static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER; -void nstime_init(nstime_t *time, uint64_t ns); -void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); +void nstime_init(nstime_t *time, uint64_t ns); +void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); uint64_t nstime_ns(const nstime_t *time); uint64_t nstime_ms(const nstime_t *time); uint64_t nstime_sec(const nstime_t *time); uint64_t nstime_nsec(const nstime_t *time); -void nstime_copy(nstime_t *time, const nstime_t *source); -int nstime_compare(const nstime_t *a, const nstime_t *b); -void nstime_add(nstime_t *time, const nstime_t *addend); -void nstime_iadd(nstime_t *time, uint64_t addend); -void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); -void nstime_isubtract(nstime_t *time, uint64_t subtrahend); -void nstime_imultiply(nstime_t *time, uint64_t multiplier); -void nstime_idivide(nstime_t *time, uint64_t divisor); +void nstime_copy(nstime_t *time, const nstime_t *source); +int nstime_compare(const nstime_t *a, const nstime_t *b); +void nstime_add(nstime_t *time, const nstime_t *addend); +void nstime_iadd(nstime_t *time, uint64_t addend); +void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); +void nstime_isubtract(nstime_t *time, uint64_t subtrahend); +void nstime_imultiply(nstime_t *time, uint64_t multiplier); +void nstime_idivide(nstime_t *time, uint64_t divisor); uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); uint64_t nstime_ns_since(const nstime_t *past); uint64_t nstime_ms_since(const nstime_t *past); -typedef bool (nstime_monotonic_t)(void); +typedef bool(nstime_monotonic_t)(void); extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; -typedef void (nstime_update_t)(nstime_t *); +typedef void(nstime_update_t)(nstime_t *); extern nstime_update_t *JET_MUTABLE nstime_update; -typedef void (nstime_prof_update_t)(nstime_t *); +typedef void(nstime_prof_update_t)(nstime_t *); extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update; void nstime_init_update(nstime_t *time); void nstime_prof_init_update(nstime_t *time); -enum prof_time_res_e { - prof_time_res_default = 0, - prof_time_res_high = 1 -}; +enum prof_time_res_e { prof_time_res_default = 0, prof_time_res_high = 1 }; typedef enum prof_time_res_e prof_time_res_t; -extern prof_time_res_t opt_prof_time_res; +extern prof_time_res_t opt_prof_time_res; extern const char *const prof_time_res_mode_names[]; JEMALLOC_ALWAYS_INLINE void diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index 75626738..3f2d10b0 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -101,7 +101,7 @@ struct pa_shard_s { * these configurations to use many fewer arenas, and therefore have a * higher risk of hot locks. */ - sec_t hpa_sec; + sec_t hpa_sec; hpa_shard_t hpa_shard; /* The source of edata_t objects. */ @@ -109,7 +109,7 @@ struct pa_shard_s { unsigned ind; - malloc_mutex_t *stats_mtx; + malloc_mutex_t *stats_mtx; pa_shard_stats_t *stats; /* The emap this shard is tied to. */ @@ -121,8 +121,8 @@ struct pa_shard_s { static inline bool pa_shard_dont_decay_muzzy(pa_shard_t *shard) { - return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 && - pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0; + return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 + && pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0; } static inline ehooks_t * @@ -186,10 +186,10 @@ bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, * (We could make generated_dirty the return value of course, but this is more * consistent with the shrink pathway and our error codes here). */ -void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, - bool *deferred_work_generated); -bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state, - ssize_t decay_ms, pac_purge_eagerness_t eagerness); +void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, + bool *deferred_work_generated); +bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state, + ssize_t decay_ms, pac_purge_eagerness_t eagerness); ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state); /* @@ -199,10 +199,10 @@ ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state); * though, the arena, background thread, and PAC modules are tightly interwoven * in a way that's tricky to extricate, so we only do the HPA-specific parts. */ -void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard, - bool deferral_allowed); -void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); -void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_set_deferral_allowed( + tsdn_t *tsdn, pa_shard_t *shard, bool deferral_allowed); +void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); /******************************************************************************/ @@ -228,8 +228,8 @@ size_t pa_shard_nactive(pa_shard_t *shard); size_t pa_shard_ndirty(pa_shard_t *shard); size_t pa_shard_nmuzzy(pa_shard_t *shard); -void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, - size_t *ndirty, size_t *nmuzzy); +void pa_shard_basic_stats_merge( + pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy); void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard, pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out, diff --git a/include/jemalloc/internal/pac.h b/include/jemalloc/internal/pac.h index 243e97f3..a9edc19b 100644 --- a/include/jemalloc/internal/pac.h +++ b/include/jemalloc/internal/pac.h @@ -95,12 +95,12 @@ struct pac_s { ecache_t ecache_muzzy; ecache_t ecache_retained; - base_t *base; - emap_t *emap; + base_t *base; + emap_t *emap; edata_cache_t *edata_cache; /* The grow info for the retained ecache. */ - exp_grow_t exp_grow; + exp_grow_t exp_grow; malloc_mutex_t grow_mtx; /* Special allocator for guarded frequently reused extents. */ @@ -119,7 +119,7 @@ struct pac_s { decay_t decay_muzzy; /* muzzy --> retained */ malloc_mutex_t *stats_mtx; - pac_stats_t *stats; + pac_stats_t *stats; /* Extent serial number generator state. */ atomic_zu_t extent_sn_next; @@ -141,8 +141,8 @@ struct pac_thp_s { bool thp_madvise; /* Below fields are protected by the lock. */ malloc_mutex_t lock; - bool auto_thp_switched; - atomic_u_t n_thp_lazy; + bool auto_thp_switched; + atomic_u_t n_thp_lazy; /* * List that tracks HUGEPAGE aligned regions that're lazily hugified * in auto thp mode. @@ -195,11 +195,11 @@ bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, * * Returns true on error (if the new limit is not valid). */ -bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit, - size_t *new_limit); +bool pac_retain_grow_limit_get_set( + tsdn_t *tsdn, pac_t *pac, size_t *old_limit, size_t *new_limit); -bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, - ssize_t decay_ms, pac_purge_eagerness_t eagerness); +bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, + ssize_t decay_ms, pac_purge_eagerness_t eagerness); ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state); void pac_reset(tsdn_t *tsdn, pac_t *pac); diff --git a/include/jemalloc/internal/pages.h b/include/jemalloc/internal/pages.h index 366bc30b..31909934 100644 --- a/include/jemalloc/internal/pages.h +++ b/include/jemalloc/internal/pages.h @@ -5,27 +5,24 @@ #include "jemalloc/internal/jemalloc_internal_types.h" /* Actual operating system page size, detected during bootstrap, <= PAGE. */ -extern size_t os_page; +extern size_t os_page; /* Page size. LG_PAGE is determined by the configure script. */ #ifdef PAGE_MASK -# undef PAGE_MASK +# undef PAGE_MASK #endif -#define PAGE ((size_t)(1U << LG_PAGE)) -#define PAGE_MASK ((size_t)(PAGE - 1)) +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) /* Return the page base address for the page containing address a. */ -#define PAGE_ADDR2BASE(a) \ - ALIGNMENT_ADDR2BASE(a, PAGE) +#define PAGE_ADDR2BASE(a) ALIGNMENT_ADDR2BASE(a, PAGE) /* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) +#define PAGE_CEILING(s) (((s) + PAGE_MASK) & ~PAGE_MASK) /* Return the largest pagesize multiple that is <=s. */ -#define PAGE_FLOOR(s) \ - ((s) & ~PAGE_MASK) +#define PAGE_FLOOR(s) ((s) & ~PAGE_MASK) /* Huge page size. LG_HUGEPAGE is determined by the configure script. */ -#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) -#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) +#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) +#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) /* * Used to validate that the hugepage size is not unexpectedly high. The huge @@ -36,7 +33,7 @@ extern size_t os_page; #define HUGEPAGE_MAX_EXPECTED_SIZE ((size_t)(16U << 20)) #if LG_HUGEPAGE != 0 -# define HUGEPAGE_PAGES (HUGEPAGE / PAGE) +# define HUGEPAGE_PAGES (HUGEPAGE / PAGE) #else /* * It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If @@ -45,19 +42,17 @@ extern size_t os_page; * that this value is at least 1. (We won't ever run in this degraded state; * hpa_supported() returns false in this case. */ -# define HUGEPAGE_PAGES 1 +# define HUGEPAGE_PAGES 1 #endif /* Return the huge page base address for the huge page containing address a. */ -#define HUGEPAGE_ADDR2BASE(a) \ - ALIGNMENT_ADDR2BASE(a, HUGEPAGE) +#define HUGEPAGE_ADDR2BASE(a) ALIGNMENT_ADDR2BASE(a, HUGEPAGE) /* Return the smallest pagesize multiple that is >= s. */ -#define HUGEPAGE_CEILING(s) \ - (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) +#define HUGEPAGE_CEILING(s) (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) /* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ #if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) -# define PAGES_CAN_PURGE_LAZY +# define PAGES_CAN_PURGE_LAZY #endif /* * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. @@ -68,10 +63,11 @@ extern size_t os_page; * next step after purging on Windows anyway, there's no point in adding such * complexity. */ -#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ - defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ - defined(JEMALLOC_MAPS_COALESCE)) -# define PAGES_CAN_PURGE_FORCED +#if !defined(_WIN32) \ + && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) \ + && defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) \ + || defined(JEMALLOC_MAPS_COALESCE)) +# define PAGES_CAN_PURGE_FORCED #endif static const bool pages_can_purge_lazy = @@ -90,7 +86,7 @@ static const bool pages_can_purge_forced = ; #if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL) -# define PAGES_CAN_HUGIFY +# define PAGES_CAN_HUGIFY #endif static const bool pages_can_hugify = @@ -102,25 +98,25 @@ static const bool pages_can_hugify = ; typedef enum { - thp_mode_default = 0, /* Do not change hugepage settings. */ - thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ - thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ + thp_mode_default = 0, /* Do not change hugepage settings. */ + thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ + thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ - thp_mode_names_limit = 3, /* Used for option processing. */ - thp_mode_not_supported = 3 /* No THP support detected. */ + thp_mode_names_limit = 3, /* Used for option processing. */ + thp_mode_not_supported = 3 /* No THP support detected. */ } thp_mode_t; #define THP_MODE_DEFAULT thp_mode_default -extern thp_mode_t opt_thp; -extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ +extern thp_mode_t opt_thp; +extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ extern const char *const thp_mode_names[]; void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); -void pages_unmap(void *addr, size_t size); -bool pages_commit(void *addr, size_t size); -bool pages_decommit(void *addr, size_t size); -bool pages_purge_lazy(void *addr, size_t size); -bool pages_purge_forced(void *addr, size_t size); +void pages_unmap(void *addr, size_t size); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge_lazy(void *addr, size_t size); +bool pages_purge_forced(void *addr, size_t size); bool pages_purge_process_madvise(void *vec, size_t ven_len, size_t total_bytes); bool pages_huge(void *addr, size_t size); bool pages_nohuge(void *addr, size_t size); @@ -128,7 +124,7 @@ bool pages_collapse(void *addr, size_t size); bool pages_dontdump(void *addr, size_t size); bool pages_dodump(void *addr, size_t size); bool pages_boot(void); -void pages_set_thp_state (void *ptr, size_t size); +void pages_set_thp_state(void *ptr, size_t size); void pages_mark_guards(void *head, void *tail); void pages_unmark_guards(void *head, void *tail); diff --git a/include/jemalloc/internal/pai.h b/include/jemalloc/internal/pai.h index 557d30d1..1d924657 100644 --- a/include/jemalloc/internal/pai.h +++ b/include/jemalloc/internal/pai.h @@ -41,9 +41,8 @@ struct pai_s { */ static inline edata_t * -pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, - bool zero, bool guarded, bool frequent_reuse, - bool *deferred_work_generated) { +pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, + bool guarded, bool frequent_reuse, bool *deferred_work_generated) { return self->alloc(tsdn, self, size, alignment, zero, guarded, frequent_reuse, deferred_work_generated); } @@ -66,13 +65,13 @@ pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, static inline bool pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool *deferred_work_generated) { - return self->shrink(tsdn, self, edata, old_size, new_size, - deferred_work_generated); + return self->shrink( + tsdn, self, edata, old_size, new_size, deferred_work_generated); } static inline void -pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated) { +pai_dalloc( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) { self->dalloc(tsdn, self, edata, deferred_work_generated); } diff --git a/include/jemalloc/internal/peak.h b/include/jemalloc/internal/peak.h index 2a973cb8..599f1a02 100644 --- a/include/jemalloc/internal/peak.h +++ b/include/jemalloc/internal/peak.h @@ -14,7 +14,8 @@ struct peak_s { uint64_t adjustment; }; -#define PEAK_INITIALIZER {0, 0} +#define PEAK_INITIALIZER \ + { 0, 0 } static inline uint64_t peak_max(peak_t *peak) { diff --git a/include/jemalloc/internal/peak_event.h b/include/jemalloc/internal/peak_event.h index 1e339ff8..0d1f1627 100644 --- a/include/jemalloc/internal/peak_event.h +++ b/include/jemalloc/internal/peak_event.h @@ -20,7 +20,7 @@ /* Update the peak with current tsd state. */ void peak_event_update(tsd_t *tsd); /* Set current state to zero. */ -void peak_event_zero(tsd_t *tsd); +void peak_event_zero(tsd_t *tsd); uint64_t peak_event_max(tsd_t *tsd); extern te_base_cb_t peak_te_handler; diff --git a/include/jemalloc/internal/ph.h b/include/jemalloc/internal/ph.h index 05376004..803d2cbd 100644 --- a/include/jemalloc/internal/ph.h +++ b/include/jemalloc/internal/ph.h @@ -129,8 +129,7 @@ phn_prev_set(void *phn, void *prev, size_t offset) { } JEMALLOC_ALWAYS_INLINE void -phn_merge_ordered(void *phn0, void *phn1, size_t offset, - ph_cmp_t cmp) { +phn_merge_ordered(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) { void *phn0child; assert(phn0 != NULL); @@ -361,15 +360,14 @@ ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) { phn_next_set(phn, phn_next_get(ph->root, offset), offset); if (phn_next_get(ph->root, offset) != NULL) { - phn_prev_set(phn_next_get(ph->root, offset), phn, - offset); + phn_prev_set(phn_next_get(ph->root, offset), phn, offset); } phn_prev_set(phn, ph->root, offset); phn_next_set(ph->root, phn, offset); ph->auxcount++; unsigned nmerges = ffs_zu(ph->auxcount); - bool done = false; + bool done = false; for (unsigned i = 0; i < nmerges && !done; i++) { done = ph_try_aux_merge_pair(ph, offset, cmp); } @@ -387,7 +385,6 @@ ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) { ph->root = ph_merge_children(ph->root, offset, cmp); return ret; - } JEMALLOC_ALWAYS_INLINE void @@ -398,11 +395,11 @@ ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) { return; } - void* prev = phn_prev_get(phn, offset); - void* next = phn_next_get(phn, offset); + void *prev = phn_prev_get(phn, offset); + void *next = phn_next_get(phn, offset); /* If we have children, then we integrate them back in the heap. */ - void* replace = ph_merge_children(phn, offset, cmp); + void *replace = ph_merge_children(phn, offset, cmp); if (replace != NULL) { phn_next_set(replace, next, offset); if (next != NULL) { @@ -438,16 +435,16 @@ ph_enumerate_vars_init(ph_enumerate_vars_t *vars, uint16_t max_visit_num, * max_queue_size must be able to support max_visit_num, which means * the queue will not overflow before reaching max_visit_num. */ - assert(vars->max_queue_size >= (vars->max_visit_num + 1)/2); + assert(vars->max_queue_size >= (vars->max_visit_num + 1) / 2); } JEMALLOC_ALWAYS_INLINE void -ph_enumerate_queue_push(void *phn, void **bfs_queue, - ph_enumerate_vars_t *vars) { +ph_enumerate_queue_push( + void *phn, void **bfs_queue, ph_enumerate_vars_t *vars) { assert(vars->queue_size < vars->max_queue_size); bfs_queue[vars->rear] = phn; vars->rear = (vars->rear + 1) % vars->max_queue_size; - (vars->queue_size) ++; + (vars->queue_size)++; } JEMALLOC_ALWAYS_INLINE void * @@ -456,11 +453,10 @@ ph_enumerate_queue_pop(void **bfs_queue, ph_enumerate_vars_t *vars) { assert(vars->queue_size <= vars->max_queue_size); void *ret = bfs_queue[vars->front]; vars->front = (vars->front + 1) % vars->max_queue_size; - (vars->queue_size) --; + (vars->queue_size)--; return ret; } - /* * The two functions below offer a solution to enumerate the pairing heap. * Whe enumerating, always call ph_enumerate_prepare first to prepare the queue @@ -478,13 +474,13 @@ ph_enumerate_prepare(ph_t *ph, void **bfs_queue, ph_enumerate_vars_t *vars, } JEMALLOC_ALWAYS_INLINE void * -ph_enumerate_next(ph_t *ph, size_t offset, void **bfs_queue, - ph_enumerate_vars_t *vars) { +ph_enumerate_next( + ph_t *ph, size_t offset, void **bfs_queue, ph_enumerate_vars_t *vars) { if (vars->queue_size == 0) { return NULL; } - (vars->visited_num) ++; + (vars->visited_num)++; if (vars->visited_num > vars->max_visit_num) { return NULL; } @@ -502,109 +498,97 @@ ph_enumerate_next(ph_t *ph, size_t offset, void **bfs_queue, return ret; } -#define ph_structs(a_prefix, a_type, a_max_queue_size) \ -typedef struct { \ - phn_link_t link; \ -} a_prefix##_link_t; \ - \ -typedef struct { \ - ph_t ph; \ -} a_prefix##_t; \ - \ -typedef struct { \ - void *bfs_queue[a_max_queue_size]; \ - ph_enumerate_vars_t vars; \ -} a_prefix##_enumerate_helper_t; - +#define ph_structs(a_prefix, a_type, a_max_queue_size) \ + typedef struct { \ + phn_link_t link; \ + } a_prefix##_link_t; \ + \ + typedef struct { \ + ph_t ph; \ + } a_prefix##_t; \ + \ + typedef struct { \ + void *bfs_queue[a_max_queue_size]; \ + ph_enumerate_vars_t vars; \ + } a_prefix##_enumerate_helper_t; /* * The ph_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to ph_gen(). */ -#define ph_proto(a_attr, a_prefix, a_type) \ - \ -a_attr void a_prefix##_new(a_prefix##_t *ph); \ -a_attr bool a_prefix##_empty(a_prefix##_t *ph); \ -a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \ -a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \ -a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \ -a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \ -a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \ -a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph); \ -a_attr void a_prefix##_enumerate_prepare(a_prefix##_t *ph, \ - a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \ - uint16_t max_queue_size); \ -a_attr a_type *a_prefix##_enumerate_next(a_prefix##_t *ph, \ - a_prefix##_enumerate_helper_t *helper); +#define ph_proto(a_attr, a_prefix, a_type) \ + \ + a_attr void a_prefix##_new(a_prefix##_t *ph); \ + a_attr bool a_prefix##_empty(a_prefix##_t *ph); \ + a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \ + a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \ + a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \ + a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \ + a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \ + a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph); \ + a_attr void a_prefix##_enumerate_prepare(a_prefix##_t *ph, \ + a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \ + uint16_t max_queue_size); \ + a_attr a_type *a_prefix##_enumerate_next( \ + a_prefix##_t *ph, a_prefix##_enumerate_helper_t *helper); /* The ph_gen() macro generates a type-specific pairing heap implementation. */ -#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \ -JEMALLOC_ALWAYS_INLINE int \ -a_prefix##_ph_cmp(void *a, void *b) { \ - return a_cmp((a_type *)a, (a_type *)b); \ -} \ - \ -a_attr void \ -a_prefix##_new(a_prefix##_t *ph) { \ - ph_new(&ph->ph); \ -} \ - \ -a_attr bool \ -a_prefix##_empty(a_prefix##_t *ph) { \ - return ph_empty(&ph->ph); \ -} \ - \ -a_attr a_type * \ -a_prefix##_first(a_prefix##_t *ph) { \ - return ph_first(&ph->ph, offsetof(a_type, a_field), \ - &a_prefix##_ph_cmp); \ -} \ - \ -a_attr a_type * \ -a_prefix##_any(a_prefix##_t *ph) { \ - return ph_any(&ph->ph, offsetof(a_type, a_field)); \ -} \ - \ -a_attr void \ -a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \ - ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \ - a_prefix##_ph_cmp); \ -} \ - \ -a_attr a_type * \ -a_prefix##_remove_first(a_prefix##_t *ph) { \ - return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \ - a_prefix##_ph_cmp); \ -} \ - \ -a_attr void \ -a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \ - ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \ - a_prefix##_ph_cmp); \ -} \ - \ -a_attr a_type * \ -a_prefix##_remove_any(a_prefix##_t *ph) { \ - a_type *ret = a_prefix##_any(ph); \ - if (ret != NULL) { \ - a_prefix##_remove(ph, ret); \ - } \ - return ret; \ -} \ - \ -a_attr void \ -a_prefix##_enumerate_prepare(a_prefix##_t *ph, \ - a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \ - uint16_t max_queue_size) { \ - ph_enumerate_prepare(&ph->ph, helper->bfs_queue, &helper->vars, \ - max_visit_num, max_queue_size); \ -} \ - \ -a_attr a_type * \ -a_prefix##_enumerate_next(a_prefix##_t *ph, \ - a_prefix##_enumerate_helper_t *helper) { \ - return ph_enumerate_next(&ph->ph, offsetof(a_type, a_field), \ - helper->bfs_queue, &helper->vars); \ -} +#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \ + JEMALLOC_ALWAYS_INLINE int a_prefix##_ph_cmp(void *a, void *b) { \ + return a_cmp((a_type *)a, (a_type *)b); \ + } \ + \ + a_attr void a_prefix##_new(a_prefix##_t *ph) { \ + ph_new(&ph->ph); \ + } \ + \ + a_attr bool a_prefix##_empty(a_prefix##_t *ph) { \ + return ph_empty(&ph->ph); \ + } \ + \ + a_attr a_type *a_prefix##_first(a_prefix##_t *ph) { \ + return ph_first( \ + &ph->ph, offsetof(a_type, a_field), &a_prefix##_ph_cmp); \ + } \ + \ + a_attr a_type *a_prefix##_any(a_prefix##_t *ph) { \ + return ph_any(&ph->ph, offsetof(a_type, a_field)); \ + } \ + \ + a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \ + ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \ + a_prefix##_ph_cmp); \ + } \ + \ + a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph) { \ + return ph_remove_first( \ + &ph->ph, offsetof(a_type, a_field), a_prefix##_ph_cmp); \ + } \ + \ + a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \ + ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \ + a_prefix##_ph_cmp); \ + } \ + \ + a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph) { \ + a_type *ret = a_prefix##_any(ph); \ + if (ret != NULL) { \ + a_prefix##_remove(ph, ret); \ + } \ + return ret; \ + } \ + \ + a_attr void a_prefix##_enumerate_prepare(a_prefix##_t *ph, \ + a_prefix##_enumerate_helper_t *helper, uint16_t max_visit_num, \ + uint16_t max_queue_size) { \ + ph_enumerate_prepare(&ph->ph, helper->bfs_queue, \ + &helper->vars, max_visit_num, max_queue_size); \ + } \ + \ + a_attr a_type *a_prefix##_enumerate_next( \ + a_prefix##_t *ph, a_prefix##_enumerate_helper_t *helper) { \ + return ph_enumerate_next(&ph->ph, offsetof(a_type, a_field), \ + helper->bfs_queue, &helper->vars); \ + } #endif /* JEMALLOC_INTERNAL_PH_H */ diff --git a/include/jemalloc/internal/prng.h b/include/jemalloc/internal/prng.h index 81060d32..04049519 100644 --- a/include/jemalloc/internal/prng.h +++ b/include/jemalloc/internal/prng.h @@ -26,11 +26,11 @@ /******************************************************************************/ /* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ -#define PRNG_A_32 UINT32_C(1103515241) -#define PRNG_C_32 UINT32_C(12347) +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) -#define PRNG_A_64 UINT64_C(6364136223846793005) -#define PRNG_C_64 UINT64_C(1442695040888963407) +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) JEMALLOC_ALWAYS_INLINE uint32_t prng_state_next_u32(uint32_t state) { @@ -49,7 +49,7 @@ prng_state_next_zu(size_t state) { #elif LG_SIZEOF_PTR == 3 return (state * PRNG_A_64) + PRNG_C_64; #else -#error Unsupported pointer size +# error Unsupported pointer size #endif } diff --git a/include/jemalloc/internal/prof_data.h b/include/jemalloc/internal/prof_data.h index 43e8d7e7..0af5835c 100644 --- a/include/jemalloc/internal/prof_data.h +++ b/include/jemalloc/internal/prof_data.h @@ -17,21 +17,21 @@ extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES]; void prof_bt_hash(const void *key, size_t r_hash[2]); bool prof_bt_keycomp(const void *k1, const void *k2); -bool prof_data_init(tsd_t *tsd); +bool prof_data_init(tsd_t *tsd); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); -int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name); -void prof_unbias_map_init(void); +int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name); +void prof_unbias_map_init(void); void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, prof_tdata_t *tdata, bool leakcheck); -prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, +prof_tdata_t *prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active); -void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata); -void prof_reset(tsd_t *tsd, size_t lg_sample); -void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx); +void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx); /* Used in unit tests. */ size_t prof_tdata_count(void); size_t prof_bt_count(void); -void prof_cnt_all(prof_cnt_t *cnt_all); +void prof_cnt_all(prof_cnt_t *cnt_all); #endif /* JEMALLOC_INTERNAL_PROF_DATA_H */ diff --git a/include/jemalloc/internal/prof_externs.h b/include/jemalloc/internal/prof_externs.h index 7d962522..e41e30a0 100644 --- a/include/jemalloc/internal/prof_externs.h +++ b/include/jemalloc/internal/prof_externs.h @@ -7,21 +7,22 @@ #include "jemalloc/internal/prof_hook.h" #include "jemalloc/internal/thread_event_registry.h" -extern bool opt_prof; -extern bool opt_prof_active; -extern bool opt_prof_thread_active_init; +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; extern unsigned opt_prof_bt_max; -extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern size_t opt_experimental_lg_prof_threshold; /* Mean bytes between thresholds. */ -extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ -extern bool opt_prof_gdump; /* High-water memory dumping. */ -extern bool opt_prof_final; /* Final profile dumping. */ -extern bool opt_prof_leak; /* Dump leak summary at exit. */ -extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */ -extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern bool opt_prof_log; /* Turn logging on at boot. */ -extern char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern size_t + opt_experimental_lg_prof_threshold; /* Mean bytes between thresholds. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern bool opt_prof_log; /* Turn logging on at boot. */ +extern char opt_prof_prefix[ +/* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif @@ -57,19 +58,19 @@ extern size_t lg_prof_sample; extern bool prof_booted; -void prof_backtrace_hook_set(prof_backtrace_hook_t hook); +void prof_backtrace_hook_set(prof_backtrace_hook_t hook); prof_backtrace_hook_t prof_backtrace_hook_get(void); -void prof_dump_hook_set(prof_dump_hook_t hook); +void prof_dump_hook_set(prof_dump_hook_t hook); prof_dump_hook_t prof_dump_hook_get(void); -void prof_sample_hook_set(prof_sample_hook_t hook); +void prof_sample_hook_set(prof_sample_hook_t hook); prof_sample_hook_t prof_sample_hook_get(void); -void prof_sample_free_hook_set(prof_sample_free_hook_t hook); +void prof_sample_free_hook_set(prof_sample_free_hook_t hook); prof_sample_free_hook_t prof_sample_free_hook_get(void); -void prof_threshold_hook_set(prof_threshold_hook_t hook); +void prof_threshold_hook_set(prof_threshold_hook_t hook); prof_threshold_hook_t prof_threshold_hook_get(void); /* Functions only accessed in prof_inlines.h */ @@ -77,33 +78,33 @@ prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx); -void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size, - size_t usize, prof_tctx_t *tctx); -void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, - prof_info_t *prof_info); +void prof_malloc_sample_object( + tsd_t *tsd, const void *ptr, size_t size, size_t usize, prof_tctx_t *tctx); +void prof_free_sampled_object( + tsd_t *tsd, const void *ptr, size_t usize, prof_info_t *prof_info); prof_tctx_t *prof_tctx_create(tsd_t *tsd); -void prof_idump(tsdn_t *tsdn); -bool prof_mdump(tsd_t *tsd, const char *filename); -void prof_gdump(tsdn_t *tsdn); +void prof_idump(tsdn_t *tsdn); +bool prof_mdump(tsd_t *tsd, const char *filename); +void prof_gdump(tsdn_t *tsdn); -void prof_tdata_cleanup(tsd_t *tsd); -bool prof_active_get(tsdn_t *tsdn); -bool prof_active_set(tsdn_t *tsdn, bool active); +void prof_tdata_cleanup(tsd_t *tsd); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); const char *prof_thread_name_get(tsd_t *tsd); -int prof_thread_name_set(tsd_t *tsd, const char *thread_name); -bool prof_thread_active_get(tsd_t *tsd); -bool prof_thread_active_set(tsd_t *tsd, bool active); -bool prof_thread_active_init_get(tsdn_t *tsdn); -bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); -bool prof_gdump_get(tsdn_t *tsdn); -bool prof_gdump_set(tsdn_t *tsdn, bool active); -void prof_boot0(void); -void prof_boot1(void); -bool prof_boot2(tsd_t *tsd, base_t *base); -void prof_prefork0(tsdn_t *tsdn); -void prof_prefork1(tsdn_t *tsdn); -void prof_postfork_parent(tsdn_t *tsdn); -void prof_postfork_child(tsdn_t *tsdn); +int prof_thread_name_set(tsd_t *tsd, const char *thread_name); +bool prof_thread_active_get(tsd_t *tsd); +bool prof_thread_active_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(tsd_t *tsd, base_t *base); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); uint64_t prof_sample_new_event_wait(tsd_t *tsd); uint64_t tsd_prof_sample_event_wait_get(tsd_t *tsd); @@ -130,8 +131,8 @@ uint64_t tsd_prof_sample_event_wait_get(tsd_t *tsd); */ JEMALLOC_ALWAYS_INLINE bool -te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize, - size_t *surplus) { +te_prof_sample_event_lookahead_surplus( + tsd_t *tsd, size_t usize, size_t *surplus) { if (surplus != NULL) { /* * This is a dead store: the surplus will be overwritten before @@ -146,8 +147,8 @@ te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize, return false; } /* The subtraction is intentionally susceptible to underflow. */ - uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize - - tsd_thread_allocated_last_event_get(tsd); + uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize + - tsd_thread_allocated_last_event_get(tsd); uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd); if (accumbytes < sample_wait) { return false; diff --git a/include/jemalloc/internal/prof_hook.h b/include/jemalloc/internal/prof_hook.h index 2f3a81af..69dfaabf 100644 --- a/include/jemalloc/internal/prof_hook.h +++ b/include/jemalloc/internal/prof_hook.h @@ -21,7 +21,8 @@ typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned); typedef void (*prof_dump_hook_t)(const char *filename); /* ptr, size, backtrace vector, backtrace vector length, usize */ -typedef void (*prof_sample_hook_t)(const void *ptr, size_t size, void **backtrace, unsigned backtrace_length, size_t usize); +typedef void (*prof_sample_hook_t)(const void *ptr, size_t size, + void **backtrace, unsigned backtrace_length, size_t usize); /* ptr, size */ typedef void (*prof_sample_free_hook_t)(const void *, size_t); @@ -29,6 +30,7 @@ typedef void (*prof_sample_free_hook_t)(const void *, size_t); /* * A callback hook that notifies when an allocation threshold has been crossed. */ -typedef void (*prof_threshold_hook_t)(uint64_t alloc, uint64_t dealloc, uint64_t peak); +typedef void (*prof_threshold_hook_t)( + uint64_t alloc, uint64_t dealloc, uint64_t peak); #endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */ diff --git a/include/jemalloc/internal/prof_inlines.h b/include/jemalloc/internal/prof_inlines.h index 75300ee4..4a36bd7a 100644 --- a/include/jemalloc/internal/prof_inlines.h +++ b/include/jemalloc/internal/prof_inlines.h @@ -164,8 +164,8 @@ JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) { prof_tctx_t *ret; - if (!prof_active || - likely(prof_sample_should_skip(tsd, sample_event))) { + if (!prof_active + || likely(prof_sample_should_skip(tsd, sample_event))) { ret = PROF_TCTX_SENTINEL; } else { ret = prof_tctx_create(tsd); @@ -242,8 +242,8 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize, * counters. */ if (unlikely(old_sampled)) { - prof_free_sampled_object(tsd, old_ptr, old_usize, - old_prof_info); + prof_free_sampled_object( + tsd, old_ptr, old_usize, old_prof_info); } } @@ -254,9 +254,10 @@ prof_sample_align(size_t usize, size_t orig_align) { * w/o metadata lookup. */ assert(opt_prof); - return (orig_align < PROF_SAMPLE_ALIGNMENT && - (sz_can_use_slab(usize) || opt_cache_oblivious)) ? - PROF_SAMPLE_ALIGNMENT : orig_align; + return (orig_align < PROF_SAMPLE_ALIGNMENT + && (sz_can_use_slab(usize) || opt_cache_oblivious)) + ? PROF_SAMPLE_ALIGNMENT + : orig_align; } JEMALLOC_ALWAYS_INLINE bool @@ -271,8 +272,8 @@ prof_sampled(tsd_t *tsd, const void *ptr) { } JEMALLOC_ALWAYS_INLINE void -prof_free(tsd_t *tsd, const void *ptr, size_t usize, - emap_alloc_ctx_t *alloc_ctx) { +prof_free( + tsd_t *tsd, const void *ptr, size_t usize, emap_alloc_ctx_t *alloc_ctx) { prof_info_t prof_info; prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info); diff --git a/include/jemalloc/internal/prof_log.h b/include/jemalloc/internal/prof_log.h index 0b1271c8..d9b97dc8 100644 --- a/include/jemalloc/internal/prof_log.h +++ b/include/jemalloc/internal/prof_log.h @@ -13,9 +13,9 @@ bool prof_log_init(tsd_t *tsdn); size_t prof_log_bt_count(void); size_t prof_log_alloc_count(void); size_t prof_log_thr_count(void); -bool prof_log_is_logging(void); -bool prof_log_rep_check(void); -void prof_log_dummy_set(bool new_value); +bool prof_log_is_logging(void); +bool prof_log_rep_check(void); +void prof_log_dummy_set(bool new_value); bool prof_log_start(tsdn_t *tsdn, const char *filename); bool prof_log_stop(tsdn_t *tsdn); diff --git a/include/jemalloc/internal/prof_structs.h b/include/jemalloc/internal/prof_structs.h index 084a549d..d38b15ea 100644 --- a/include/jemalloc/internal/prof_structs.h +++ b/include/jemalloc/internal/prof_structs.h @@ -10,29 +10,29 @@ struct prof_bt_s { /* Backtrace, stored as len program counters. */ - void **vec; - unsigned len; + void **vec; + unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { - void **vec; - unsigned *len; - unsigned max; + void **vec; + unsigned *len; + unsigned max; } prof_unwind_data_t; #endif struct prof_cnt_s { /* Profiling counters. */ - uint64_t curobjs; - uint64_t curobjs_shifted_unbiased; - uint64_t curbytes; - uint64_t curbytes_unbiased; - uint64_t accumobjs; - uint64_t accumobjs_shifted_unbiased; - uint64_t accumbytes; - uint64_t accumbytes_unbiased; + uint64_t curobjs; + uint64_t curobjs_shifted_unbiased; + uint64_t curbytes; + uint64_t curbytes_unbiased; + uint64_t accumobjs; + uint64_t accumobjs_shifted_unbiased; + uint64_t accumbytes; + uint64_t accumbytes_unbiased; }; typedef enum { @@ -44,26 +44,26 @@ typedef enum { struct prof_tctx_s { /* Thread data for thread that performed the allocation. */ - prof_tdata_t *tdata; + prof_tdata_t *tdata; /* * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be * defunct during teardown. */ - uint64_t thr_uid; - uint64_t thr_discrim; + uint64_t thr_uid; + uint64_t thr_discrim; /* * Reference count of how many times this tctx object is referenced in * recent allocation / deallocation records, protected by tdata->lock. */ - uint64_t recent_count; + uint64_t recent_count; /* Profiling counters, protected by tdata->lock. */ - prof_cnt_t cnts; + prof_cnt_t cnts; /* Associated global context. */ - prof_gctx_t *gctx; + prof_gctx_t *gctx; /* * UID that distinguishes multiple tctx's created by the same thread, @@ -78,40 +78,40 @@ struct prof_tctx_s { * threshold can be hit again before the first consumer finishes * executing prof_tctx_destroy(). */ - uint64_t tctx_uid; + uint64_t tctx_uid; /* Linkage into gctx's tctxs. */ - rb_node(prof_tctx_t) tctx_link; + rb_node(prof_tctx_t) tctx_link; /* * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents * sample vs destroy race. */ - bool prepared; + bool prepared; /* Current dump-related state, protected by gctx->lock. */ - prof_tctx_state_t state; + prof_tctx_state_t state; /* * Copy of cnts snapshotted during early dump phase, protected by * dump_mtx. */ - prof_cnt_t dump_cnts; + prof_cnt_t dump_cnts; }; typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; struct prof_info_s { /* Time when the allocation was made. */ - nstime_t alloc_time; + nstime_t alloc_time; /* Points to the prof_tctx_t corresponding to the allocation. */ - prof_tctx_t *alloc_tctx; + prof_tctx_t *alloc_tctx; /* Allocation request size. */ - size_t alloc_size; + size_t alloc_size; }; struct prof_gctx_s { /* Protects nlimbo, cnt_summed, and tctxs. */ - malloc_mutex_t *lock; + malloc_mutex_t *lock; /* * Number of threads that currently cause this gctx to be in a state of @@ -123,48 +123,48 @@ struct prof_gctx_s { * nlimbo must be 1 (single destroyer) in order to safely destroy the * gctx. */ - unsigned nlimbo; + unsigned nlimbo; /* * Tree of profile counters, one for each thread that has allocated in * this context. */ - prof_tctx_tree_t tctxs; + prof_tctx_tree_t tctxs; /* Linkage for tree of contexts to be dumped. */ - rb_node(prof_gctx_t) dump_link; + rb_node(prof_gctx_t) dump_link; /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; + prof_cnt_t cnt_summed; /* Associated backtrace. */ - prof_bt_t bt; + prof_bt_t bt; /* Backtrace vector, variable size, referred to by bt. */ - void *vec[1]; + void *vec[1]; }; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; struct prof_tdata_s { - malloc_mutex_t *lock; + malloc_mutex_t *lock; /* Monotonically increasing unique thread identifier. */ - uint64_t thr_uid; + uint64_t thr_uid; /* * Monotonically increasing discriminator among tdata structures * associated with the same thr_uid. */ - uint64_t thr_discrim; + uint64_t thr_discrim; - rb_node(prof_tdata_t) tdata_link; + rb_node(prof_tdata_t) tdata_link; /* * Counter used to initialize prof_tctx_t's tctx_uid. No locking is * necessary when incrementing this field, because only one thread ever * does so. */ - uint64_t tctx_uid_next; + uint64_t tctx_uid_next; /* * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks @@ -172,15 +172,15 @@ struct prof_tdata_s { * associated with thread-specific prof_tctx_t objects. Other threads * may write to prof_tctx_t contents when freeing associated objects. */ - ckh_t bt2tctx; + ckh_t bt2tctx; /* Included in heap profile dumps if has content. */ - char thread_name[PROF_THREAD_NAME_MAX_LEN]; + char thread_name[PROF_THREAD_NAME_MAX_LEN]; /* State used to avoid dumping while operating on prof internals. */ - bool enq; - bool enq_idump; - bool enq_gdump; + bool enq; + bool enq_idump; + bool enq_gdump; /* * Set to true during an early dump phase for tdata's which are @@ -188,22 +188,22 @@ struct prof_tdata_s { * to false so that they aren't accidentally included in later dump * phases. */ - bool dumping; + bool dumping; /* * True if profiling is active for this tdata's thread * (thread.prof.active mallctl). */ - bool active; + bool active; - bool attached; - bool expired; + bool attached; + bool expired; /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; + prof_cnt_t cnt_summed; /* Backtrace vector, used for calls to prof_backtrace(). */ - void **vec; + void **vec; }; typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; @@ -212,9 +212,9 @@ struct prof_recent_s { nstime_t dalloc_time; ql_elm(prof_recent_t) link; - size_t size; - size_t usize; - atomic_p_t alloc_edata; /* NULL means allocation has been freed. */ + size_t size; + size_t usize; + atomic_p_t alloc_edata; /* NULL means allocation has been freed. */ prof_tctx_t *alloc_tctx; prof_tctx_t *dalloc_tctx; }; diff --git a/include/jemalloc/internal/prof_sys.h b/include/jemalloc/internal/prof_sys.h index 42284b38..0745b991 100644 --- a/include/jemalloc/internal/prof_sys.h +++ b/include/jemalloc/internal/prof_sys.h @@ -6,30 +6,30 @@ #include "jemalloc/internal/mutex.h" extern malloc_mutex_t prof_dump_filename_mtx; -extern base_t *prof_base; +extern base_t *prof_base; void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(tsd_t *tsd, prof_bt_t *bt); void prof_hooks_init(void); void prof_unwind_init(void); void prof_sys_thread_name_fetch(tsd_t *tsd); -int prof_getpid(void); +int prof_getpid(void); void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind); bool prof_prefix_set(tsdn_t *tsdn, const char *prefix); void prof_fdump_impl(tsd_t *tsd); void prof_idump_impl(tsd_t *tsd); bool prof_mdump_impl(tsd_t *tsd, const char *filename); void prof_gdump_impl(tsd_t *tsd); -int prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high); +int prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high); /* Used in unit tests. */ -typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit); +typedef int(prof_sys_thread_name_read_t)(char *buf, size_t limit); extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read; -typedef int (prof_dump_open_file_t)(const char *, int); +typedef int(prof_dump_open_file_t)(const char *, int); extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file; -typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t); +typedef ssize_t(prof_dump_write_file_t)(int, const void *, size_t); extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file; -typedef int (prof_dump_open_maps_t)(void); +typedef int(prof_dump_open_maps_t)(void); extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps; #endif /* JEMALLOC_INTERNAL_PROF_SYS_H */ diff --git a/include/jemalloc/internal/prof_types.h b/include/jemalloc/internal/prof_types.h index a27f7fb3..7468885e 100644 --- a/include/jemalloc/internal/prof_types.h +++ b/include/jemalloc/internal/prof_types.h @@ -1,22 +1,22 @@ #ifndef JEMALLOC_INTERNAL_PROF_TYPES_H #define JEMALLOC_INTERNAL_PROF_TYPES_H -typedef struct prof_bt_s prof_bt_t; -typedef struct prof_cnt_s prof_cnt_t; -typedef struct prof_tctx_s prof_tctx_t; -typedef struct prof_info_s prof_info_t; -typedef struct prof_gctx_s prof_gctx_t; -typedef struct prof_tdata_s prof_tdata_t; +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_info_s prof_info_t; +typedef struct prof_gctx_s prof_gctx_t; +typedef struct prof_tdata_s prof_tdata_t; typedef struct prof_recent_s prof_recent_t; /* Option defaults. */ #ifdef JEMALLOC_PROF -# define PROF_PREFIX_DEFAULT "jeprof" +# define PROF_PREFIX_DEFAULT "jeprof" #else -# define PROF_PREFIX_DEFAULT "" +# define PROF_PREFIX_DEFAULT "" #endif -#define LG_PROF_SAMPLE_DEFAULT 19 -#define LG_PROF_INTERVAL_DEFAULT -1 +#define LG_PROF_SAMPLE_DEFAULT 19 +#define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that @@ -24,54 +24,54 @@ typedef struct prof_recent_s prof_recent_t; * of backtrace frame handlers, and should be kept in sync with this setting. */ #ifdef JEMALLOC_PROF_GCC -# define PROF_BT_MAX_LIMIT 256 +# define PROF_BT_MAX_LIMIT 256 #else -# define PROF_BT_MAX_LIMIT UINT_MAX +# define PROF_BT_MAX_LIMIT UINT_MAX #endif -#define PROF_BT_MAX_DEFAULT 128 +#define PROF_BT_MAX_DEFAULT 128 /* Initial hash table size. */ -#define PROF_CKH_MINITEMS 64 +#define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #ifndef JEMALLOC_PROF /* Minimize memory bloat for non-prof builds. */ -# define PROF_DUMP_BUFSIZE 1 +# define PROF_DUMP_BUFSIZE 1 #elif defined(JEMALLOC_DEBUG) /* Use a small buffer size in debug build, mainly to facilitate testing. */ -# define PROF_DUMP_BUFSIZE 16 +# define PROF_DUMP_BUFSIZE 16 #else -# define PROF_DUMP_BUFSIZE 65536 +# define PROF_DUMP_BUFSIZE 65536 #endif /* Size of size class related tables */ #ifdef JEMALLOC_PROF -# define PROF_SC_NSIZES SC_NSIZES +# define PROF_SC_NSIZES SC_NSIZES #else /* Minimize memory bloat for non-prof builds. */ -# define PROF_SC_NSIZES 1 +# define PROF_SC_NSIZES 1 #endif /* Size of stack-allocated buffer used by prof_printf(). */ -#define PROF_PRINTF_BUFSIZE 128 +#define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all gctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ -#define PROF_NCTX_LOCKS 1024 +#define PROF_NCTX_LOCKS 1024 /* * Number of mutexes shared among all tdata's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ -#define PROF_NTDATA_LOCKS 256 +#define PROF_NTDATA_LOCKS 256 /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF -#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1) +# define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1) #else -#define PROF_DUMP_FILENAME_LEN 1 +# define PROF_DUMP_FILENAME_LEN 1 #endif /* Default number of recent allocations to record. */ diff --git a/include/jemalloc/internal/psset.h b/include/jemalloc/internal/psset.h index ea608213..3fdecaed 100644 --- a/include/jemalloc/internal/psset.h +++ b/include/jemalloc/internal/psset.h @@ -90,7 +90,7 @@ struct psset_s { */ hpdata_age_heap_t pageslabs[PSSET_NPSIZES]; /* Bitmap for which set bits correspond to non-empty heaps. */ - fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)]; + fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)]; psset_stats_t stats; /* * Slabs with no active allocations, but which are allowed to serve new diff --git a/include/jemalloc/internal/ql.h b/include/jemalloc/internal/ql.h index ebe69988..9c1776a4 100644 --- a/include/jemalloc/internal/ql.h +++ b/include/jemalloc/internal/ql.h @@ -28,33 +28,36 @@ */ /* List definitions. */ -#define ql_head(a_type) \ -struct { \ - a_type *qlh_first; \ -} +#define ql_head(a_type) \ + struct { \ + a_type *qlh_first; \ + } /* Static initializer for an empty list. */ -#define ql_head_initializer(a_head) {NULL} +#define ql_head_initializer(a_head) \ + { NULL } /* The field definition. */ -#define ql_elm(a_type) qr(a_type) +#define ql_elm(a_type) qr(a_type) /* A pointer to the first element in the list, or NULL if the list is empty. */ #define ql_first(a_head) ((a_head)->qlh_first) /* Dynamically initializes a list. */ -#define ql_new(a_head) do { \ - ql_first(a_head) = NULL; \ -} while (0) +#define ql_new(a_head) \ + do { \ + ql_first(a_head) = NULL; \ + } while (0) /* * Sets dest to be the contents of src (overwriting any elements there), leaving * src empty. */ -#define ql_move(a_head_dest, a_head_src) do { \ - ql_first(a_head_dest) = ql_first(a_head_src); \ - ql_new(a_head_src); \ -} while (0) +#define ql_move(a_head_dest, a_head_src) \ + do { \ + ql_first(a_head_dest) = ql_first(a_head_src); \ + ql_new(a_head_src); \ + } while (0) /* True if the list is empty, otherwise false. */ #define ql_empty(a_head) (ql_first(a_head) == NULL) @@ -68,85 +71,91 @@ struct { \ /* * Obtains the last item in the list. */ -#define ql_last(a_head, a_field) \ +#define ql_last(a_head, a_field) \ (ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field)) /* * Gets a pointer to the next/prev element in the list. Trying to advance past * the end or retreat before the beginning of the list returns NULL. */ -#define ql_next(a_head, a_elm, a_field) \ - ((ql_last(a_head, a_field) != (a_elm)) \ - ? qr_next((a_elm), a_field) : NULL) -#define ql_prev(a_head, a_elm, a_field) \ - ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ - : NULL) +#define ql_next(a_head, a_elm, a_field) \ + ((ql_last(a_head, a_field) != (a_elm)) ? qr_next((a_elm), a_field) \ + : NULL) +#define ql_prev(a_head, a_elm, a_field) \ + ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) : NULL) /* Inserts a_elm before a_qlelm in the list. */ -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ - qr_before_insert((a_qlelm), (a_elm), a_field); \ - if (ql_first(a_head) == (a_qlelm)) { \ - ql_first(a_head) = (a_elm); \ - } \ -} while (0) +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) \ + do { \ + qr_before_insert((a_qlelm), (a_elm), a_field); \ + if (ql_first(a_head) == (a_qlelm)) { \ + ql_first(a_head) = (a_elm); \ + } \ + } while (0) /* Inserts a_elm after a_qlelm in the list. */ -#define ql_after_insert(a_qlelm, a_elm, a_field) \ +#define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) /* Inserts a_elm as the first item in the list. */ -#define ql_head_insert(a_head, a_elm, a_field) do { \ - if (!ql_empty(a_head)) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = (a_elm); \ -} while (0) +#define ql_head_insert(a_head, a_elm, a_field) \ + do { \ + if (!ql_empty(a_head)) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = (a_elm); \ + } while (0) /* Inserts a_elm as the last item in the list. */ -#define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (!ql_empty(a_head)) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = qr_next((a_elm), a_field); \ -} while (0) +#define ql_tail_insert(a_head, a_elm, a_field) \ + do { \ + if (!ql_empty(a_head)) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = qr_next((a_elm), a_field); \ + } while (0) /* * Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in: * a = [a1, ..., a_n, b_1, ..., b_n] and b = []. */ -#define ql_concat(a_head_a, a_head_b, a_field) do { \ - if (ql_empty(a_head_a)) { \ - ql_move(a_head_a, a_head_b); \ - } else if (!ql_empty(a_head_b)) { \ - qr_meld(ql_first(a_head_a), ql_first(a_head_b), \ - a_field); \ - ql_new(a_head_b); \ - } \ -} while (0) +#define ql_concat(a_head_a, a_head_b, a_field) \ + do { \ + if (ql_empty(a_head_a)) { \ + ql_move(a_head_a, a_head_b); \ + } else if (!ql_empty(a_head_b)) { \ + qr_meld( \ + ql_first(a_head_a), ql_first(a_head_b), a_field); \ + ql_new(a_head_b); \ + } \ + } while (0) /* Removes a_elm from the list. */ -#define ql_remove(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) == (a_elm)) { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ - } \ - if (ql_first(a_head) != (a_elm)) { \ - qr_remove((a_elm), a_field); \ - } else { \ - ql_new(a_head); \ - } \ -} while (0) +#define ql_remove(a_head, a_elm, a_field) \ + do { \ + if (ql_first(a_head) == (a_elm)) { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } \ + if (ql_first(a_head) != (a_elm)) { \ + qr_remove((a_elm), a_field); \ + } else { \ + ql_new(a_head); \ + } \ + } while (0) /* Removes the first item in the list. */ -#define ql_head_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_first(a_head); \ - ql_remove((a_head), t, a_field); \ -} while (0) +#define ql_head_remove(a_head, a_type, a_field) \ + do { \ + a_type *t = ql_first(a_head); \ + ql_remove((a_head), t, a_field); \ + } while (0) /* Removes the last item in the list. */ -#define ql_tail_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_last(a_head, a_field); \ - ql_remove((a_head), t, a_field); \ -} while (0) +#define ql_tail_remove(a_head, a_type, a_field) \ + do { \ + a_type *t = ql_last(a_head, a_field); \ + ql_remove((a_head), t, a_field); \ + } while (0) /* * Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...], @@ -155,14 +164,15 @@ struct { \ * and replaces b's contents with: * b = [a_n, a_n+1, ...] */ -#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \ - if (ql_first(a_head_a) == (a_elm)) { \ - ql_move(a_head_b, a_head_a); \ - } else { \ - qr_split(ql_first(a_head_a), (a_elm), a_field); \ - ql_first(a_head_b) = (a_elm); \ - } \ -} while (0) +#define ql_split(a_head_a, a_elm, a_head_b, a_field) \ + do { \ + if (ql_first(a_head_a) == (a_elm)) { \ + ql_move(a_head_b, a_head_a); \ + } else { \ + qr_split(ql_first(a_head_a), (a_elm), a_field); \ + ql_first(a_head_b) = (a_elm); \ + } \ + } while (0) /* * An optimized version of: @@ -170,9 +180,10 @@ struct { \ * ql_remove((a_head), t, a_field); * ql_tail_insert((a_head), t, a_field); */ -#define ql_rotate(a_head, a_field) do { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ -} while (0) +#define ql_rotate(a_head, a_field) \ + do { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } while (0) /* * Helper macro to iterate over each element in a list in order, starting from @@ -189,10 +200,10 @@ struct { \ * } */ -#define ql_foreach(a_var, a_head, a_field) \ - qr_foreach((a_var), ql_first(a_head), a_field) +#define ql_foreach(a_var, a_head, a_field) \ + qr_foreach ((a_var), ql_first(a_head), a_field) -#define ql_reverse_foreach(a_var, a_head, a_field) \ +#define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) #endif /* JEMALLOC_INTERNAL_QL_H */ diff --git a/include/jemalloc/internal/qr.h b/include/jemalloc/internal/qr.h index ece4f556..1bd61f38 100644 --- a/include/jemalloc/internal/qr.h +++ b/include/jemalloc/internal/qr.h @@ -17,21 +17,22 @@ */ /* Ring definitions. */ -#define qr(a_type) \ -struct { \ - a_type *qre_next; \ - a_type *qre_prev; \ -} +#define qr(a_type) \ + struct { \ + a_type *qre_next; \ + a_type *qre_prev; \ + } /* * Initialize a qr link. Every link must be initialized before being used, even * if that initialization is going to be immediately overwritten (say, by being * passed into an insertion macro). */ -#define qr_new(a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) +#define qr_new(a_qr, a_field) \ + do { \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ + } while (0) /* * Go forwards or backwards in the ring. Note that (the ring being circular), this @@ -58,26 +59,27 @@ struct { \ * * a_qr_a can directly be a qr_next() macro, but a_qr_b cannot. */ -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = \ - (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ -} while (0) +#define qr_meld(a_qr_a, a_qr_b, a_field) \ + do { \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = \ + (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next; \ + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ + } while (0) /* * Logically, this is just a meld. The intent, though, is that a_qrelm is a * single-element ring, so that "before" has a more obvious interpretation than * meld. */ -#define qr_before_insert(a_qrelm, a_qr, a_field) \ +#define qr_before_insert(a_qrelm, a_qr, a_field) \ qr_meld((a_qrelm), (a_qr), a_field) /* Ditto, but inserting after rather than before. */ -#define qr_after_insert(a_qrelm, a_qr, a_field) \ +#define qr_after_insert(a_qrelm, a_qr, a_field) \ qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field) /* @@ -98,14 +100,13 @@ struct { \ * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) +#define qr_split(a_qr_a, a_qr_b, a_field) qr_meld((a_qr_a), (a_qr_b), a_field) /* * Splits off a_qr from the rest of its ring, so that it becomes a * single-element ring. */ -#define qr_remove(a_qr, a_field) \ +#define qr_remove(a_qr, a_field) \ qr_split(qr_next(a_qr, a_field), (a_qr), a_field) /* @@ -121,20 +122,19 @@ struct { \ * return sum; * } */ -#define qr_foreach(var, a_qr, a_field) \ - for ((var) = (a_qr); \ - (var) != NULL; \ - (var) = (((var)->a_field.qre_next != (a_qr)) \ - ? (var)->a_field.qre_next : NULL)) +#define qr_foreach(var, a_qr, a_field) \ + for ((var) = (a_qr); (var) != NULL; \ + (var) = (((var)->a_field.qre_next != (a_qr)) \ + ? (var)->a_field.qre_next \ + : NULL)) /* * The same (and with the same usage) as qr_foreach, but in the opposite order, * ending with a_qr. */ -#define qr_reverse_foreach(var, a_qr, a_field) \ - for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ - (var) != NULL; \ - (var) = (((var) != (a_qr)) \ - ? (var)->a_field.qre_prev : NULL)) +#define qr_reverse_foreach(var, a_qr, a_field) \ + for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ + (var) != NULL; \ + (var) = (((var) != (a_qr)) ? (var)->a_field.qre_prev : NULL)) #endif /* JEMALLOC_INTERNAL_QR_H */ diff --git a/include/jemalloc/internal/quantum.h b/include/jemalloc/internal/quantum.h index b4beb309..2f7c0466 100644 --- a/include/jemalloc/internal/quantum.h +++ b/include/jemalloc/internal/quantum.h @@ -6,82 +6,84 @@ * classes). */ #ifndef LG_QUANTUM -# if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 -# endif -# ifdef __ia64__ -# define LG_QUANTUM 4 -# endif -# ifdef __alpha__ -# define LG_QUANTUM 4 -# endif -# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) -# define LG_QUANTUM 4 -# endif -# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 -# endif -# ifdef __arm__ -# define LG_QUANTUM 3 -# endif -# if defined(__aarch64__) || defined(_M_ARM64) -# define LG_QUANTUM 4 -# endif -# ifdef __hppa__ -# define LG_QUANTUM 4 -# endif -# ifdef __loongarch__ -# define LG_QUANTUM 4 -# endif -# ifdef __m68k__ -# define LG_QUANTUM 3 -# endif -# ifdef __mips__ -# if defined(__mips_n32) || defined(__mips_n64) -# define LG_QUANTUM 4 -# else -# define LG_QUANTUM 3 -# endif -# endif -# ifdef __nios2__ -# define LG_QUANTUM 3 -# endif -# ifdef __or1k__ -# define LG_QUANTUM 3 -# endif -# if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__ppc64__) -# define LG_QUANTUM 4 -# endif -# if defined(__riscv) || defined(__riscv__) -# define LG_QUANTUM 4 -# endif -# ifdef __s390__ -# define LG_QUANTUM 4 -# endif -# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \ - defined(__SH4_SINGLE_ONLY__)) -# define LG_QUANTUM 4 -# endif -# ifdef __tile__ -# define LG_QUANTUM 4 -# endif -# ifdef __le32__ -# define LG_QUANTUM 4 -# endif -# ifdef __arc__ -# define LG_QUANTUM 3 -# endif -# ifndef LG_QUANTUM -# error "Unknown minimum alignment for architecture; specify via " - "--with-lg-quantum" -# endif +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9) \ + || defined(__sparc_v9__)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# if defined(__aarch64__) || defined(_M_ARM64) +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __loongarch__ +# define LG_QUANTUM 4 +# endif +# ifdef __m68k__ +# define LG_QUANTUM 3 +# endif +# ifdef __mips__ +# if defined(__mips_n32) || defined(__mips_n64) +# define LG_QUANTUM 4 +# else +# define LG_QUANTUM 3 +# endif +# endif +# ifdef __nios2__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) \ + || defined(__ppc64__) +# define LG_QUANTUM 4 +# endif +# if defined(__riscv) || defined(__riscv__) +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# if (defined(__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) \ + || defined(__SH4_SINGLE_ONLY__)) +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifdef __arc__ +# define LG_QUANTUM 3 +# endif +# ifndef LG_QUANTUM +# error \ + "Unknown minimum alignment for architecture; specify via " +"--with-lg-quantum" +# endif #endif -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) /* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) +#define QUANTUM_CEILING(a) (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) #endif /* JEMALLOC_INTERNAL_QUANTUM_H */ diff --git a/include/jemalloc/internal/rb.h b/include/jemalloc/internal/rb.h index 235d548e..58510e4d 100644 --- a/include/jemalloc/internal/rb.h +++ b/include/jemalloc/internal/rb.h @@ -26,7 +26,7 @@ */ #ifndef __PGI -#define RB_COMPACT +# define RB_COMPACT #endif /* diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index f35368ae..07205958 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -18,48 +18,49 @@ */ /* Number of high insignificant bits. */ -#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) +#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR + 3)) - LG_VADDR) /* Number of low insigificant bits. */ #define RTREE_NLIB LG_PAGE /* Number of significant bits. */ #define RTREE_NSB (LG_VADDR - RTREE_NLIB) /* Number of levels in radix tree. */ #if RTREE_NSB <= 10 -# define RTREE_HEIGHT 1 +# define RTREE_HEIGHT 1 #elif RTREE_NSB <= 36 -# define RTREE_HEIGHT 2 +# define RTREE_HEIGHT 2 #elif RTREE_NSB <= 52 -# define RTREE_HEIGHT 3 +# define RTREE_HEIGHT 3 #else -# error Unsupported number of significant virtual address bits +# error Unsupported number of significant virtual address bits #endif /* Use compact leaf representation if virtual address encoding allows. */ #if RTREE_NHIB >= LG_CEIL(SC_NSIZES) -# define RTREE_LEAF_COMPACT +# define RTREE_LEAF_COMPACT #endif typedef struct rtree_node_elm_s rtree_node_elm_t; struct rtree_node_elm_s { - atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ + atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ }; typedef struct rtree_metadata_s rtree_metadata_t; struct rtree_metadata_s { - szind_t szind; - extent_state_t state; /* Mirrors edata->state. */ - bool is_head; /* Mirrors edata->is_head. */ - bool slab; + szind_t szind; + extent_state_t state; /* Mirrors edata->state. */ + bool is_head; /* Mirrors edata->is_head. */ + bool slab; }; typedef struct rtree_contents_s rtree_contents_t; struct rtree_contents_s { - edata_t *edata; + edata_t *edata; rtree_metadata_t metadata; }; #define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH #define RTREE_LEAF_STATE_SHIFT 2 -#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT) +#define RTREE_LEAF_STATE_MASK \ + MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT) struct rtree_leaf_elm_s { #ifdef RTREE_LEAF_COMPACT @@ -77,36 +78,36 @@ struct rtree_leaf_elm_s { * * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb */ - atomic_p_t le_bits; + atomic_p_t le_bits; #else - atomic_p_t le_edata; /* (edata_t *) */ + atomic_p_t le_edata; /* (edata_t *) */ /* * From high to low bits: szind (8 bits), state (4 bits), is_head, slab */ - atomic_u_t le_metadata; + atomic_u_t le_metadata; #endif }; typedef struct rtree_level_s rtree_level_t; struct rtree_level_s { /* Number of key bits distinguished by this level. */ - unsigned bits; + unsigned bits; /* * Cumulative number of key bits distinguished by traversing to * corresponding tree level. */ - unsigned cumbits; + unsigned cumbits; }; typedef struct rtree_s rtree_t; struct rtree_s { - base_t *base; - malloc_mutex_t init_lock; + base_t *base; + malloc_mutex_t init_lock; /* Number of elements based on rtree_levels[0].bits. */ #if RTREE_HEIGHT > 1 - rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; + rtree_node_elm_t root[1U << (RTREE_NSB / RTREE_HEIGHT)]; #else - rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; + rtree_leaf_elm_t root[1U << (RTREE_NSB / RTREE_HEIGHT)]; #endif }; @@ -118,17 +119,17 @@ struct rtree_s { */ static const rtree_level_t rtree_levels[] = { #if RTREE_HEIGHT == 1 - {RTREE_NSB, RTREE_NHIB + RTREE_NSB} + {RTREE_NSB, RTREE_NHIB + RTREE_NSB} #elif RTREE_HEIGHT == 2 - {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, - {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} + {RTREE_NSB / 2, RTREE_NHIB + RTREE_NSB / 2}, + {RTREE_NSB / 2 + RTREE_NSB % 2, RTREE_NHIB + RTREE_NSB} #elif RTREE_HEIGHT == 3 - {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, - {RTREE_NSB/3 + RTREE_NSB%3/2, - RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, - {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} + {RTREE_NSB / 3, RTREE_NHIB + RTREE_NSB / 3}, + {RTREE_NSB / 3 + RTREE_NSB % 3 / 2, + RTREE_NHIB + RTREE_NSB / 3 * 2 + RTREE_NSB % 3 / 2}, + {RTREE_NSB / 3 + RTREE_NSB % 3 - RTREE_NSB % 3 / 2, RTREE_NHIB + RTREE_NSB} #else -# error Unsupported rtree height +# error Unsupported rtree height #endif }; @@ -139,9 +140,9 @@ rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, JEMALLOC_ALWAYS_INLINE unsigned rtree_leaf_maskbits(void) { - unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); - unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - - rtree_levels[RTREE_HEIGHT-1].bits); + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR + 3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT - 1].cumbits + - rtree_levels[RTREE_HEIGHT - 1].bits); return ptrbits - cumbits; } @@ -153,16 +154,16 @@ rtree_leafkey(uintptr_t key) { JEMALLOC_ALWAYS_INLINE size_t rtree_cache_direct_map(uintptr_t key) { - return (size_t)((key >> rtree_leaf_maskbits()) & - (RTREE_CTX_NCACHE - 1)); + return ( + size_t)((key >> rtree_leaf_maskbits()) & (RTREE_CTX_NCACHE - 1)); } JEMALLOC_ALWAYS_INLINE uintptr_t rtree_subkey(uintptr_t key, unsigned level) { - unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); - unsigned cumbits = rtree_levels[level].cumbits; - unsigned shiftbits = ptrbits - cumbits; - unsigned maskbits = rtree_levels[level].bits; + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR + 3); + unsigned cumbits = rtree_levels[level].cumbits; + unsigned shiftbits = ptrbits - cumbits; + unsigned maskbits = rtree_levels[level].bits; uintptr_t mask = (ZU(1) << maskbits) - 1; return ((key >> shiftbits) & mask); } @@ -178,12 +179,12 @@ rtree_subkey(uintptr_t key, unsigned level) { * dependent on a previous rtree write, which means a stale read * could result if synchronization were omitted here. */ -# ifdef RTREE_LEAF_COMPACT +#ifdef RTREE_LEAF_COMPACT JEMALLOC_ALWAYS_INLINE uintptr_t -rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, bool dependent) { - return (uintptr_t)atomic_load_p(&elm->le_bits, dependent - ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); +rtree_leaf_elm_bits_read( + tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { + return (uintptr_t)atomic_load_p( + &elm->le_bits, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); } JEMALLOC_ALWAYS_INLINE uintptr_t @@ -195,10 +196,10 @@ rtree_leaf_elm_bits_encode(rtree_contents_t contents) { uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR; uintptr_t slab_bits = (uintptr_t)contents.metadata.slab; uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1; - uintptr_t state_bits = (uintptr_t)contents.metadata.state << - RTREE_LEAF_STATE_SHIFT; - uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits | - slab_bits; + uintptr_t state_bits = (uintptr_t)contents.metadata.state + << RTREE_LEAF_STATE_SHIFT; + uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits + | slab_bits; assert((edata_bits & metadata_bits) == 0); return edata_bits | metadata_bits; @@ -212,13 +213,13 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) { contents.metadata.slab = (bool)(bits & 1); contents.metadata.is_head = (bool)(bits & (1 << 1)); - uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >> - RTREE_LEAF_STATE_SHIFT; + uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) + >> RTREE_LEAF_STATE_SHIFT; assert(state_bits <= extent_state_max); contents.metadata.state = (extent_state_t)state_bits; uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1); -# ifdef __aarch64__ +# ifdef __aarch64__ /* * aarch64 doesn't sign extend the highest virtual address bit to set * the higher ones. Instead, the high bits get zeroed. @@ -228,49 +229,50 @@ rtree_leaf_elm_bits_decode(uintptr_t bits) { uintptr_t mask = high_bit_mask & low_bit_mask; /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ contents.edata = (edata_t *)(bits & mask); -# else +# else /* Restore sign-extended high bits, mask metadata bits. */ /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) - >> RTREE_NHIB) & low_bit_mask); -# endif + >> RTREE_NHIB) + & low_bit_mask); +# endif assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0); return contents; } -# endif /* RTREE_LEAF_COMPACT */ +#endif /* RTREE_LEAF_COMPACT */ JEMALLOC_ALWAYS_INLINE rtree_contents_t -rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, - bool dependent) { +rtree_leaf_elm_read( + tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits); return contents; #else rtree_contents_t contents; - unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent - ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); + unsigned metadata_bits = atomic_load_u( + &elm->le_metadata, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); contents.metadata.slab = (bool)(metadata_bits & 1); contents.metadata.is_head = (bool)(metadata_bits & (1 << 1)); - uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >> - RTREE_LEAF_STATE_SHIFT; + uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) + >> RTREE_LEAF_STATE_SHIFT; assert(state_bits <= extent_state_max); contents.metadata.state = (extent_state_t)state_bits; - contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT + - RTREE_LEAF_STATE_WIDTH); + contents.metadata.szind = metadata_bits + >> (RTREE_LEAF_STATE_SHIFT + RTREE_LEAF_STATE_WIDTH); - contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent - ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); + contents.edata = (edata_t *)atomic_load_p( + &elm->le_edata, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); return contents; #endif } JEMALLOC_ALWAYS_INLINE void -rtree_contents_encode(rtree_contents_t contents, void **bits, - unsigned *additional) { +rtree_contents_encode( + rtree_contents_t contents, void **bits, unsigned *additional) { #ifdef RTREE_LEAF_COMPACT /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ *bits = (void *)rtree_leaf_elm_bits_encode(contents); @@ -282,15 +284,15 @@ rtree_contents_encode(rtree_contents_t contents, void **bits, *additional = (unsigned)contents.metadata.slab | ((unsigned)contents.metadata.is_head << 1) | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT) - | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT + - RTREE_LEAF_STATE_WIDTH)); + | ((unsigned)contents.metadata.szind + << (RTREE_LEAF_STATE_SHIFT + RTREE_LEAF_STATE_WIDTH)); *bits = contents.edata; #endif } JEMALLOC_ALWAYS_INLINE void -rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, void *bits, unsigned additional) { +rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + void *bits, unsigned additional) { #ifdef RTREE_LEAF_COMPACT atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE); #else @@ -304,10 +306,10 @@ rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree, } JEMALLOC_ALWAYS_INLINE void -rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, rtree_contents_t contents) { +rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + rtree_contents_t contents) { assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0); - void *bits; + void *bits; unsigned additional; rtree_contents_encode(contents, &bits, &additional); rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional); @@ -348,7 +350,7 @@ rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree, JEMALLOC_ALWAYS_INLINE bool rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, rtree_leaf_elm_t **elm) { - size_t slot = rtree_cache_direct_map(key); + size_t slot = rtree_cache_direct_map(key); uintptr_t leafkey = rtree_leafkey(key); assert(leafkey != RTREE_LEAFKEY_INVALID); @@ -358,7 +360,7 @@ rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; assert(leaf != NULL); - uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT - 1); *elm = &leaf[subkey]; return false; @@ -370,7 +372,7 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, assert(key != 0); assert(!dependent || !init_missing); - size_t slot = rtree_cache_direct_map(key); + size_t slot = rtree_cache_direct_map(key); uintptr_t leafkey = rtree_leafkey(key); assert(leafkey != RTREE_LEAFKEY_INVALID); @@ -378,39 +380,41 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; assert(leaf != NULL); - uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT - 1); return &leaf[subkey]; } /* * Search the L2 LRU cache. On hit, swap the matching element into the * slot in L1 cache, and move the position in L2 up by 1. */ -#define RTREE_CACHE_CHECK_L2(i) do { \ - if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ - rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ - assert(leaf != NULL); \ - if (i > 0) { \ - /* Bubble up by one. */ \ - rtree_ctx->l2_cache[i].leafkey = \ - rtree_ctx->l2_cache[i - 1].leafkey; \ - rtree_ctx->l2_cache[i].leaf = \ - rtree_ctx->l2_cache[i - 1].leaf; \ - rtree_ctx->l2_cache[i - 1].leafkey = \ - rtree_ctx->cache[slot].leafkey; \ - rtree_ctx->l2_cache[i - 1].leaf = \ - rtree_ctx->cache[slot].leaf; \ - } else { \ - rtree_ctx->l2_cache[0].leafkey = \ - rtree_ctx->cache[slot].leafkey; \ - rtree_ctx->l2_cache[0].leaf = \ - rtree_ctx->cache[slot].leaf; \ - } \ - rtree_ctx->cache[slot].leafkey = leafkey; \ - rtree_ctx->cache[slot].leaf = leaf; \ - uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ - return &leaf[subkey]; \ - } \ -} while (0) +#define RTREE_CACHE_CHECK_L2(i) \ + do { \ + if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ + rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ + assert(leaf != NULL); \ + if (i > 0) { \ + /* Bubble up by one. */ \ + rtree_ctx->l2_cache[i].leafkey = \ + rtree_ctx->l2_cache[i - 1].leafkey; \ + rtree_ctx->l2_cache[i].leaf = \ + rtree_ctx->l2_cache[i - 1].leaf; \ + rtree_ctx->l2_cache[i - 1].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[i - 1].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } else { \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey( \ + key, RTREE_HEIGHT - 1); \ + return &leaf[subkey]; \ + } \ + } while (0) /* Check the first cache entry. */ RTREE_CACHE_CHECK_L2(0); /* Search the remaining cache elements. */ @@ -419,8 +423,8 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, } #undef RTREE_CACHE_CHECK_L2 - return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, - dependent, init_missing); + return rtree_leaf_elm_lookup_hard( + tsdn, rtree, rtree_ctx, key, dependent, init_missing); } /* @@ -440,8 +444,8 @@ rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, } static inline rtree_contents_t -rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key) { +rtree_read( + tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) { rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, /* dependent */ true, /* init_missing */ false); assert(elm != NULL); @@ -449,21 +453,22 @@ rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, } static inline rtree_metadata_t -rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key) { +rtree_metadata_read( + tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) { rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, /* dependent */ true, /* init_missing */ false); assert(elm != NULL); return rtree_leaf_elm_read(tsdn, rtree, elm, - /* dependent */ true).metadata; + /* dependent */ true) + .metadata; } /* * Returns true when the request cannot be fulfilled by fastpath. */ static inline bool -rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, rtree_metadata_t *r_rtree_metadata) { +rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, + rtree_ctx_t *rtree_ctx, uintptr_t key, rtree_metadata_t *r_rtree_metadata) { rtree_leaf_elm_t *elm; /* * Should check the bool return value (lookup success or not) instead of @@ -476,7 +481,8 @@ rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ct } assert(elm != NULL); *r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm, - /* dependent */ true).metadata; + /* dependent */ true) + .metadata; return false; } @@ -490,22 +496,27 @@ rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, * are dependent w/o init_missing, assuming the range spans across at * most 2 rtree leaf nodes (each covers 1 GiB of vaddr). */ - void *bits; + void *bits; unsigned additional; rtree_contents_encode(contents, &bits, &additional); rtree_leaf_elm_t *elm = NULL; /* Dead store. */ for (uintptr_t addr = base; addr <= end; addr += PAGE) { - if (addr == base || - (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) { - elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr, + if (addr == base + || (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) { + elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + addr, /* dependent */ true, /* init_missing */ false); assert(elm != NULL); } - assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr, - /* dependent */ true, /* init_missing */ false)); - assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm, - /* dependent */ true).edata != NULL); + assert(elm + == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr, + /* dependent */ true, /* init_missing */ false)); + assert(!clearing + || rtree_leaf_elm_read(tsdn, rtree, elm, + /* dependent */ true) + .edata + != NULL); rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional); elm++; } @@ -533,13 +544,15 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, } static inline void -rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key) { +rtree_clear( + tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) { rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, key, /* dependent */ true, /* init_missing */ false); assert(elm != NULL); assert(rtree_leaf_elm_read(tsdn, rtree, elm, - /* dependent */ true).edata != NULL); + /* dependent */ true) + .edata + != NULL); rtree_contents_t contents; contents.edata = NULL; contents.metadata.szind = SC_NSIZES; diff --git a/include/jemalloc/internal/rtree_tsd.h b/include/jemalloc/internal/rtree_tsd.h index 59f18570..4014dde0 100644 --- a/include/jemalloc/internal/rtree_tsd.h +++ b/include/jemalloc/internal/rtree_tsd.h @@ -25,7 +25,8 @@ /* Needed for initialization only. */ #define RTREE_LEAFKEY_INVALID ((uintptr_t)1) -#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL} +#define RTREE_CTX_CACHE_ELM_INVALID \ + { RTREE_LEAFKEY_INVALID, NULL } #define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID #define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1 @@ -40,23 +41,27 @@ * Static initializer (to invalidate the cache entries) is required because the * free fastpath may access the rtree cache before a full tsd initialization. */ -#define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \ - {RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}} +#define RTREE_CTX_INITIALIZER \ + { \ + {RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, { \ + RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2) \ + } \ + } typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; struct rtree_ctx_cache_elm_s { - uintptr_t leafkey; - rtree_leaf_elm_t *leaf; + uintptr_t leafkey; + rtree_leaf_elm_t *leaf; }; typedef struct rtree_ctx_s rtree_ctx_t; struct rtree_ctx_s { /* Direct mapped cache. */ - rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; + rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; /* L2 LRU cache. */ - rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; + rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; }; void rtree_ctx_data_init(rtree_ctx_t *ctx); diff --git a/include/jemalloc/internal/safety_check.h b/include/jemalloc/internal/safety_check.h index 194b7744..2b4b2d0e 100644 --- a/include/jemalloc/internal/safety_check.h +++ b/include/jemalloc/internal/safety_check.h @@ -7,8 +7,8 @@ #define SAFETY_CHECK_DOUBLE_FREE_MAX_SCAN_DEFAULT 32 -void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr, - size_t true_size, size_t input_size); +void safety_check_fail_sized_dealloc( + bool current_dealloc, const void *ptr, size_t true_size, size_t input_size); void safety_check_fail(const char *format, ...); typedef void (*safety_check_abort_hook_t)(const char *message); @@ -16,7 +16,7 @@ typedef void (*safety_check_abort_hook_t)(const char *message); /* Can set to NULL for a default. */ void safety_check_set_abort(safety_check_abort_hook_t abort_fn); -#define REDZONE_SIZE ((size_t) 32) +#define REDZONE_SIZE ((size_t)32) #define REDZONE_FILL_VALUE 0xBC /* @@ -27,9 +27,10 @@ void safety_check_set_abort(safety_check_abort_hook_t abort_fn); */ JEMALLOC_ALWAYS_INLINE const unsigned char * compute_redzone_end(const void *_ptr, size_t usize, size_t bumped_usize) { - const unsigned char *ptr = (const unsigned char *) _ptr; - const unsigned char *redzone_end = usize + REDZONE_SIZE < bumped_usize ? - &ptr[usize + REDZONE_SIZE] : &ptr[bumped_usize]; + const unsigned char *ptr = (const unsigned char *)_ptr; + const unsigned char *redzone_end = usize + REDZONE_SIZE < bumped_usize + ? &ptr[usize + REDZONE_SIZE] + : &ptr[bumped_usize]; const unsigned char *page_end = (const unsigned char *) ALIGNMENT_ADDR2CEILING(&ptr[usize], os_page); return redzone_end < page_end ? redzone_end : page_end; @@ -38,8 +39,8 @@ compute_redzone_end(const void *_ptr, size_t usize, size_t bumped_usize) { JEMALLOC_ALWAYS_INLINE void safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { assert(usize <= bumped_usize); - const unsigned char *redzone_end = - compute_redzone_end(ptr, usize, bumped_usize); + const unsigned char *redzone_end = compute_redzone_end( + ptr, usize, bumped_usize); for (unsigned char *curr = &((unsigned char *)ptr)[usize]; curr < redzone_end; curr++) { *curr = REDZONE_FILL_VALUE; @@ -47,11 +48,11 @@ safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { } JEMALLOC_ALWAYS_INLINE void -safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize) -{ - const unsigned char *redzone_end = - compute_redzone_end(ptr, usize, bumped_usize); - for (const unsigned char *curr= &((const unsigned char *)ptr)[usize]; +safety_check_verify_redzone( + const void *ptr, size_t usize, size_t bumped_usize) { + const unsigned char *redzone_end = compute_redzone_end( + ptr, usize, bumped_usize); + for (const unsigned char *curr = &((const unsigned char *)ptr)[usize]; curr < redzone_end; curr++) { if (unlikely(*curr != REDZONE_FILL_VALUE)) { safety_check_fail("Use after free error\n"); diff --git a/include/jemalloc/internal/san.h b/include/jemalloc/internal/san.h index 669f99dd..5dcae376 100644 --- a/include/jemalloc/internal/san.h +++ b/include/jemalloc/internal/san.h @@ -32,22 +32,22 @@ void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, * Unguard the extent, but don't modify emap boundaries. Must be called on an * extent that has been erased from emap and shouldn't be placed back. */ -void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, - edata_t *edata, emap_t *emap); +void san_unguard_pages_pre_destroy( + tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap); void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize); void tsd_san_init(tsd_t *tsd); void san_init(ssize_t lg_san_uaf_align); static inline void -san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap, bool remap) { +san_guard_pages_two_sided( + tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap, bool remap) { san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap); } static inline void -san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap) { +san_unguard_pages_two_sided( + tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) { san_unguard_pages(tsdn, ehooks, edata, emap, true, true); } @@ -83,14 +83,14 @@ san_guard_enabled(void) { } static inline bool -san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size, - size_t alignment) { - if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) || - tsdn_null(tsdn)) { +san_large_extent_decide_guard( + tsdn_t *tsdn, ehooks_t *ehooks, size_t size, size_t alignment) { + if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) + || tsdn_null(tsdn)) { return false; } - tsd_t *tsd = tsdn_tsd(tsdn); + tsd_t *tsd = tsdn_tsd(tsdn); uint64_t n = tsd_san_extents_until_guard_large_get(tsd); assert(n >= 1); if (n > 1) { @@ -101,10 +101,10 @@ san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size, *tsd_san_extents_until_guard_largep_get(tsd) = n - 1; } - if (n == 1 && (alignment <= PAGE) && - (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) { - *tsd_san_extents_until_guard_largep_get(tsd) = - opt_san_guard_large; + if (n == 1 && (alignment <= PAGE) + && (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) { + *tsd_san_extents_until_guard_largep_get( + tsd) = opt_san_guard_large; return true; } else { assert(tsd_san_extents_until_guard_large_get(tsd) >= 1); @@ -114,17 +114,17 @@ san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size, static inline bool san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) { - if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) || - tsdn_null(tsdn)) { + if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) + || tsdn_null(tsdn)) { return false; } - tsd_t *tsd = tsdn_tsd(tsdn); + tsd_t *tsd = tsdn_tsd(tsdn); uint64_t n = tsd_san_extents_until_guard_small_get(tsd); assert(n >= 1); if (n == 1) { - *tsd_san_extents_until_guard_smallp_get(tsd) = - opt_san_guard_small; + *tsd_san_extents_until_guard_smallp_get( + tsd) = opt_san_guard_small; return true; } else { *tsd_san_extents_until_guard_smallp_get(tsd) = n - 1; @@ -134,8 +134,8 @@ san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) { } static inline void -san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid, - void **last) { +san_junk_ptr_locations( + void *ptr, size_t usize, void **first, void **mid, void **last) { size_t ptr_sz = sizeof(void *); *first = ptr; @@ -184,8 +184,8 @@ static inline bool san_uaf_detection_enabled(void) { bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1); if (config_uaf_detection && ret) { - assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 << - opt_lg_san_uaf_align) - 1); + assert(san_cache_bin_nonfast_mask + == ((uintptr_t)1 << opt_lg_san_uaf_align) - 1); } return ret; diff --git a/include/jemalloc/internal/san_bump.h b/include/jemalloc/internal/san_bump.h index d6e9cfc5..9e42b69b 100644 --- a/include/jemalloc/internal/san_bump.h +++ b/include/jemalloc/internal/san_bump.h @@ -12,7 +12,7 @@ extern bool opt_retain; typedef struct ehooks_s ehooks_t; -typedef struct pac_s pac_t; +typedef struct pac_s pac_t; typedef struct san_bump_alloc_s san_bump_alloc_t; struct san_bump_alloc_s { @@ -36,7 +36,7 @@ san_bump_enabled(void) { } static inline bool -san_bump_alloc_init(san_bump_alloc_t* sba) { +san_bump_alloc_init(san_bump_alloc_t *sba) { bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator", WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive); if (err) { @@ -47,8 +47,7 @@ san_bump_alloc_init(san_bump_alloc_t* sba) { return false; } -edata_t * -san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks, - size_t size, bool zero); +edata_t *san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, + ehooks_t *ehooks, size_t size, bool zero); #endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */ diff --git a/include/jemalloc/internal/sc.h b/include/jemalloc/internal/sc.h index 97956e7a..17a8278a 100644 --- a/include/jemalloc/internal/sc.h +++ b/include/jemalloc/internal/sc.h @@ -174,7 +174,7 @@ #if SC_LG_TINY_MIN == 0 /* The div module doesn't support division by 1, which this would require. */ -#error "Unsupported LG_TINY_MIN" +# error "Unsupported LG_TINY_MIN" #endif /* @@ -194,8 +194,8 @@ * We could probably save some space in arenas by capping this at LG_VADDR size. */ #define SC_LG_BASE_MAX (SC_PTR_BITS - 2) -#define SC_NREGULAR (SC_NGROUP * \ - (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) +#define SC_NREGULAR \ + (SC_NGROUP * (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) #define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR) /* @@ -222,29 +222,29 @@ * * This gives us the quantity we seek. */ -#define SC_NPSIZES ( \ - SC_NGROUP \ - + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \ - + SC_NGROUP - 1) +#define SC_NPSIZES \ + (SC_NGROUP + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \ + + SC_NGROUP - 1) /* * We declare a size class is binnable if size < page size * group. Or, in other * words, lg(size) < lg(page size) + lg(group size). */ -#define SC_NBINS ( \ - /* Sub-regular size classes. */ \ - SC_NTINY + SC_NPSEUDO \ - /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \ - + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \ - /* Last SC of the last group hits the bound exactly; exclude it. */ \ - - 1) +#define SC_NBINS \ + (/* Sub-regular size classes. */ \ + SC_NTINY \ + + SC_NPSEUDO /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \ + + SC_NGROUP \ + * (LG_PAGE + SC_LG_NGROUP \ + - SC_LG_FIRST_REGULAR_BASE) /* Last SC of the last group hits the bound exactly; exclude it. */ \ + - 1) /* * The size2index_tab lookup table uses uint8_t to encode each bin index, so we * cannot support more than 256 small size classes. */ #if (SC_NBINS > 256) -# error "Too many small size classes" +# error "Too many small size classes" #endif /* The largest size class in the lookup table, and its binary log. */ @@ -256,12 +256,12 @@ #define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1)) /* The largest size class allocated out of a slab. */ -#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \ - + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) +#define SC_SMALL_MAXCLASS \ + (SC_SMALL_MAX_BASE + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) /* The fastpath assumes all lookup-able sizes are small. */ #if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS) -# error "Lookup table sizes must be small" +# error "Lookup table sizes must be small" #endif /* The smallest size class not allocated out of a slab. */ @@ -277,13 +277,13 @@ /* Maximum number of regions in one slab. */ #ifndef CONFIG_LG_SLAB_MAXREGS -# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) +# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) #else -# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN) -# error "Unsupported SC_LG_SLAB_MAXREGS" -# else -# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS -# endif +# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN) +# error "Unsupported SC_LG_SLAB_MAXREGS" +# else +# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS +# endif #endif /* @@ -364,13 +364,13 @@ struct sc_data_s { }; size_t reg_size_compute(int lg_base, int lg_delta, int ndelta); -void sc_data_init(sc_data_t *data); +void sc_data_init(sc_data_t *data); /* * Updates slab sizes in [begin, end] to be pgs pages in length, if possible. * Otherwise, does its best to accommodate the request. */ -void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, - int pgs); +void sc_data_update_slab_size( + sc_data_t *data, size_t begin, size_t end, int pgs); void sc_boot(sc_data_t *data); #endif /* JEMALLOC_INTERNAL_SC_H */ diff --git a/include/jemalloc/internal/sec.h b/include/jemalloc/internal/sec.h index 8ef1e9fb..50daf066 100644 --- a/include/jemalloc/internal/sec.h +++ b/include/jemalloc/internal/sec.h @@ -59,7 +59,7 @@ struct sec_bin_s { * stats; rather, it allows us to quickly determine the change in the * centralized counter when flushing. */ - size_t bytes_cur; + size_t bytes_cur; edata_list_active_t freelist; }; @@ -80,7 +80,7 @@ struct sec_shard_s { * that we won't go down these pathways very often after custom extent * hooks are installed. */ - bool enabled; + bool enabled; sec_bin_t *bins; /* Number of bytes in all bins in the shard. */ size_t bytes_cur; @@ -90,12 +90,12 @@ struct sec_shard_s { typedef struct sec_s sec_t; struct sec_s { - pai_t pai; + pai_t pai; pai_t *fallback; - sec_opts_t opts; + sec_opts_t opts; sec_shard_t *shards; - pszind_t npsizes; + pszind_t npsizes; }; bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, @@ -110,8 +110,8 @@ void sec_disable(tsdn_t *tsdn, sec_t *sec); * split), which simplifies the stats management. */ void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats); -void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec, - mutex_prof_data_t *mutex_prof_data); +void sec_mutex_stats_read( + tsdn_t *tsdn, sec_t *sec, mutex_prof_data_t *mutex_prof_data); /* * We use the arena lock ordering; these are acquired in phase 2 of forking, but diff --git a/include/jemalloc/internal/sec_opts.h b/include/jemalloc/internal/sec_opts.h index 19ed1492..e0699d7a 100644 --- a/include/jemalloc/internal/sec_opts.h +++ b/include/jemalloc/internal/sec_opts.h @@ -44,18 +44,14 @@ struct sec_opts_s { size_t batch_fill_extra; }; -#define SEC_OPTS_DEFAULT { \ - /* nshards */ \ - 4, \ - /* max_alloc */ \ - (32 * 1024) < PAGE ? PAGE : (32 * 1024), \ - /* max_bytes */ \ - 256 * 1024, \ - /* bytes_after_flush */ \ - 128 * 1024, \ - /* batch_fill_extra */ \ - 0 \ -} - +#define SEC_OPTS_DEFAULT \ + { \ + /* nshards */ \ + 4, /* max_alloc */ \ + (32 * 1024) < PAGE ? PAGE : (32 * 1024), /* max_bytes */ \ + 256 * 1024, /* bytes_after_flush */ \ + 128 * 1024, /* batch_fill_extra */ \ + 0 \ + } #endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */ diff --git a/include/jemalloc/internal/smoothstep.h b/include/jemalloc/internal/smoothstep.h index 2e14430f..135c4905 100644 --- a/include/jemalloc/internal/smoothstep.h +++ b/include/jemalloc/internal/smoothstep.h @@ -23,210 +23,210 @@ * smootheststep(x) = -20x + 70x - 84x + 35x */ -#define SMOOTHSTEP_VARIANT "smoother" -#define SMOOTHSTEP_NSTEPS 200 -#define SMOOTHSTEP_BFP 24 -#define SMOOTHSTEP \ - /* STEP(step, h, x, y) */ \ - STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ - STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ - STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ - STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ - STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ - STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ - STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ - STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ - STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ - STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ - STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ - STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ - STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ - STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ - STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ - STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ - STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ - STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ - STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ - STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ - STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ - STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ - STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ - STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ - STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ - STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ - STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ - STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ - STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ - STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ - STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ - STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ - STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ - STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ - STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ - STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ - STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ - STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ - STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ - STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ - STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ - STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ - STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ - STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ - STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ - STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ - STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ - STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ - STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ - STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ - STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ - STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ - STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ - STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ - STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ - STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ - STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ - STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ - STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ - STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ - STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ - STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ - STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ - STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ - STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ - STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ - STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ - STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ - STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ - STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ - STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ - STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ - STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ - STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ - STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ - STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ - STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ - STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ - STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ - STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ - STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ - STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ - STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ - STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ - STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ - STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ - STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ - STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ - STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ - STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ - STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ - STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ - STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ - STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ - STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ - STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ - STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ - STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ - STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ - STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ - STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ - STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ - STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ - STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ - STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ - STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ - STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ - STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ - STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ - STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ - STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ - STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ - STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ - STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ - STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ - STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ - STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ - STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ - STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ - STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ - STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ - STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ - STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ - STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ - STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ - STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ - STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ - STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ - STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ - STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ - STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ - STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ - STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ - STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ - STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ - STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ - STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ - STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ - STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ - STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ - STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ - STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ - STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ - STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ - STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ - STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ - STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ - STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ - STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ - STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ - STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ - STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ - STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ - STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ - STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ - STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ - STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ - STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ - STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ - STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ - STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ - STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ - STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ - STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ - STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ - STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ - STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ - STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ - STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ - STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ - STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ - STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ - STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ - STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ - STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ - STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ - STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ - STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ - STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ - STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ - STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ - STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ - STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ - STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ - STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ - STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ - STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ - STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ - STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ - STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ - STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ - STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ - STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ - STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ - STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ - STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ - STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ - STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ - STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ - STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ +#define SMOOTHSTEP_VARIANT "smoother" +#define SMOOTHSTEP_NSTEPS 200 +#define SMOOTHSTEP_BFP 24 +#define SMOOTHSTEP \ + /* STEP(step, h, x, y) */ \ + STEP(1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ + STEP(2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ + STEP(3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ + STEP(4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ + STEP(5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ + STEP(6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ + STEP(7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ + STEP(8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ + STEP(9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ + STEP(10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ + STEP(11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ + STEP(12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ + STEP(13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ + STEP(14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ + STEP(15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ + STEP(16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ + STEP(17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ + STEP(18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ + STEP(19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ + STEP(20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ + STEP(21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ + STEP(22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ + STEP(23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ + STEP(24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ + STEP(25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ + STEP(26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ + STEP(27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ + STEP(28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ + STEP(29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ + STEP(30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ + STEP(31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ + STEP(32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ + STEP(33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ + STEP(34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ + STEP(35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ + STEP(36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ + STEP(37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ + STEP(38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ + STEP(39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ + STEP(40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ + STEP(41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ + STEP(42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ + STEP(43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ + STEP(44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ + STEP(45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ + STEP(46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ + STEP(47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ + STEP(48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ + STEP(49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ + STEP(50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ + STEP(51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ + STEP(52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ + STEP(53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ + STEP(54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ + STEP(55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ + STEP(56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ + STEP(57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ + STEP(58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ + STEP(59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ + STEP(60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ + STEP(61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ + STEP(62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ + STEP(63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ + STEP(64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ + STEP(65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ + STEP(66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ + STEP(67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ + STEP(68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ + STEP(69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ + STEP(70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ + STEP(71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ + STEP(72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ + STEP(73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ + STEP(74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ + STEP(75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ + STEP(76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ + STEP(77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ + STEP(78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ + STEP(79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ + STEP(80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ + STEP(81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ + STEP(82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ + STEP(83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ + STEP(84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ + STEP(85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ + STEP(86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ + STEP(87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ + STEP(88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ + STEP(89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ + STEP(90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ + STEP(91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ + STEP(92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ + STEP(93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ + STEP(94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ + STEP(95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ + STEP(96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ + STEP(97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ + STEP(98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ + STEP(99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ + STEP(100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ + STEP(101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ + STEP(102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ + STEP(103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ + STEP(104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ + STEP(105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ + STEP(106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ + STEP(107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ + STEP(108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ + STEP(109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ + STEP(110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ + STEP(111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ + STEP(112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ + STEP(113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ + STEP(114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ + STEP(115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ + STEP(116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ + STEP(117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ + STEP(118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ + STEP(119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ + STEP(120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ + STEP(121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ + STEP(122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ + STEP(123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ + STEP(124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ + STEP(125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ + STEP(126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ + STEP(127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ + STEP(128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ + STEP(129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ + STEP(130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ + STEP(131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ + STEP(132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ + STEP(133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ + STEP(134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ + STEP(135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ + STEP(136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ + STEP(137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ + STEP(138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ + STEP(139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ + STEP(140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ + STEP(141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ + STEP(142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ + STEP(143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ + STEP(144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ + STEP(145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ + STEP(146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ + STEP(147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ + STEP(148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ + STEP(149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ + STEP(150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ + STEP(151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ + STEP(152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ + STEP(153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ + STEP(154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ + STEP(155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ + STEP(156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ + STEP(157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ + STEP(158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ + STEP(159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ + STEP(160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ + STEP(161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ + STEP(162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ + STEP(163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ + STEP(164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ + STEP(165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ + STEP(166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ + STEP(167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ + STEP(168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ + STEP(169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ + STEP(170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ + STEP(171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ + STEP(172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ + STEP(173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ + STEP(174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ + STEP(175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ + STEP(176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ + STEP(177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ + STEP(178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ + STEP(179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ + STEP(180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ + STEP(181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ + STEP(182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ + STEP(183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ + STEP(184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ + STEP(185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ + STEP(186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ + STEP(187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ + STEP(188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ + STEP(189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ + STEP(190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ + STEP(191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ + STEP(192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ + STEP(193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ + STEP(194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ + STEP(195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ + STEP(196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ + STEP(197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ + STEP(198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ + STEP(199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ + STEP(200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) #endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ diff --git a/include/jemalloc/internal/spin.h b/include/jemalloc/internal/spin.h index 87c400d5..4cd5e1db 100644 --- a/include/jemalloc/internal/spin.h +++ b/include/jemalloc/internal/spin.h @@ -3,7 +3,8 @@ #include "jemalloc/internal/jemalloc_preamble.h" -#define SPIN_INITIALIZER {0U} +#define SPIN_INITIALIZER \ + { 0U } typedef struct { unsigned iteration; @@ -11,12 +12,12 @@ typedef struct { static inline void spin_cpu_spinwait(void) { -# if HAVE_CPU_SPINWAIT +#if HAVE_CPU_SPINWAIT CPU_SPINWAIT; -# else +#else volatile int x = 0; x = x; -# endif +#endif } static inline void diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h index a5f1be32..1c7b23e0 100644 --- a/include/jemalloc/internal/stats.h +++ b/include/jemalloc/internal/stats.h @@ -7,32 +7,32 @@ #include "jemalloc/internal/tsd_types.h" /* OPTION(opt, var_name, default, set_value_to) */ -#define STATS_PRINT_OPTIONS \ - OPTION('J', json, false, true) \ - OPTION('g', general, true, false) \ - OPTION('m', merged, config_stats, false) \ - OPTION('d', destroyed, config_stats, false) \ - OPTION('a', unmerged, config_stats, false) \ - OPTION('b', bins, true, false) \ - OPTION('l', large, true, false) \ - OPTION('x', mutex, true, false) \ - OPTION('e', extents, true, false) \ - OPTION('h', hpa, config_stats, false) +#define STATS_PRINT_OPTIONS \ + OPTION('J', json, false, true) \ + OPTION('g', general, true, false) \ + OPTION('m', merged, config_stats, false) \ + OPTION('d', destroyed, config_stats, false) \ + OPTION('a', unmerged, config_stats, false) \ + OPTION('b', bins, true, false) \ + OPTION('l', large, true, false) \ + OPTION('x', mutex, true, false) \ + OPTION('e', extents, true, false) \ + OPTION('h', hpa, config_stats, false) enum { #define OPTION(o, v, d, s) stats_print_option_num_##v, - STATS_PRINT_OPTIONS + STATS_PRINT_OPTIONS #undef OPTION - stats_print_tot_num_options + stats_print_tot_num_options }; /* Options for stats_print. */ extern bool opt_stats_print; -extern char opt_stats_print_opts[stats_print_tot_num_options+1]; +extern char opt_stats_print_opts[stats_print_tot_num_options + 1]; /* Utilities for stats_interval. */ extern int64_t opt_stats_interval; -extern char opt_stats_interval_opts[stats_print_tot_num_options+1]; +extern char opt_stats_interval_opts[stats_print_tot_num_options + 1]; #define STATS_INTERVAL_DEFAULT -1 /* diff --git a/include/jemalloc/internal/sz.h b/include/jemalloc/internal/sz.h index 3a32e232..d75a3034 100644 --- a/include/jemalloc/internal/sz.h +++ b/include/jemalloc/internal/sz.h @@ -76,8 +76,9 @@ sz_psz2ind(size_t psz) { * SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g. * off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1). */ - pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ? - 0 : x - (SC_LG_NGROUP + LG_PAGE); + pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) + ? 0 + : x - (SC_LG_NGROUP + LG_PAGE); /* * Same as sc_s::lg_delta. @@ -85,8 +86,9 @@ sz_psz2ind(size_t psz) { * for each increase in offset, it's multiplied by two. * Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1). */ - pszind_t lg_delta = (off_to_first_ps_rg == 0) ? - LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1); + pszind_t lg_delta = (off_to_first_ps_rg == 0) + ? LG_PAGE + : LG_PAGE + (off_to_first_ps_rg - 1); /* * Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7. @@ -118,13 +120,13 @@ sz_pind2sz_compute(pszind_t pind) { size_t grp = pind >> SC_LG_NGROUP; size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1); - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp) + size_t grp_size_mask = ~((!!grp) - 1); + size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP - 1))) << grp) & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_PAGE-1); - size_t mod_size = (mod+1) << lg_delta; + size_t lg_delta = shift + (LG_PAGE - 1); + size_t mod_size = (mod + 1) << lg_delta; size_t sz = grp_size + mod_size; return sz; @@ -148,9 +150,10 @@ sz_psz2u(size_t psz) { if (unlikely(psz > SC_LARGE_MAXCLASS)) { return SC_LARGE_MAXCLASS + PAGE; } - size_t x = lg_floor((psz<<1)-1); - size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? - LG_PAGE : x - SC_LG_NGROUP - 1; + size_t x = lg_floor((psz << 1) - 1); + size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) + ? LG_PAGE + : x - SC_LG_NGROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (psz + delta_mask) & ~delta_mask; @@ -174,17 +177,19 @@ sz_size2index_compute_inline(size_t size) { } #endif { - szind_t x = lg_floor((size<<1)-1); - szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 : - x - (SC_LG_NGROUP + LG_QUANTUM); + szind_t x = lg_floor((size << 1) - 1); + szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) + ? 0 + : x - (SC_LG_NGROUP + LG_QUANTUM); szind_t grp = shift << SC_LG_NGROUP; szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - SC_LG_NGROUP - 1; + ? LG_QUANTUM + : x - SC_LG_NGROUP - 1; - size_t delta_inverse_mask = ZU(-1) << lg_delta; - szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << SC_LG_NGROUP) - 1); + size_t delta_inverse_mask = ZU(-1) << lg_delta; + szind_t mod = ((((size - 1) & delta_inverse_mask) >> lg_delta)) + & ((ZU(1) << SC_LG_NGROUP) - 1); szind_t index = SC_NTINY + grp + mod; return index; @@ -228,16 +233,16 @@ sz_index2size_compute_inline(szind_t index) { { size_t reduced_index = index - SC_NTINY; size_t grp = reduced_index >> SC_LG_NGROUP; - size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) - - 1); + size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) - 1); - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_QUANTUM + - (SC_LG_NGROUP-1))) << grp) & grp_size_mask; + size_t grp_size_mask = ~((!!grp) - 1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + (SC_LG_NGROUP - 1))) + << grp) + & grp_size_mask; size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_QUANTUM-1); - size_t mod_size = (mod+1) << lg_delta; + size_t lg_delta = shift + (LG_QUANTUM - 1); + size_t mod_size = (mod + 1) << lg_delta; size_t usize = grp_size + mod_size; return usize; @@ -269,8 +274,8 @@ sz_index2size_unsafe(szind_t index) { JEMALLOC_ALWAYS_INLINE size_t sz_index2size(szind_t index) { - assert(!sz_large_size_classes_disabled() || - index <= sz_size2index(USIZE_GROW_SLOW_THRESHOLD)); + assert(!sz_large_size_classes_disabled() + || index <= sz_size2index(USIZE_GROW_SLOW_THRESHOLD)); size_t size = sz_index2size_unsafe(index); /* * With large size classes disabled, the usize above @@ -285,8 +290,8 @@ sz_index2size(szind_t index) { * the size is no larger than USIZE_GROW_SLOW_THRESHOLD here * instead of SC_LARGE_MINCLASS. */ - assert(!sz_large_size_classes_disabled() || - size <= USIZE_GROW_SLOW_THRESHOLD); + assert(!sz_large_size_classes_disabled() + || size <= USIZE_GROW_SLOW_THRESHOLD); return size; } @@ -309,9 +314,10 @@ sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) { JEMALLOC_ALWAYS_INLINE size_t sz_s2u_compute_using_delta(size_t size) { - size_t x = lg_floor((size<<1)-1); + size_t x = lg_floor((size << 1) - 1); size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - SC_LG_NGROUP - 1; + ? LG_QUANTUM + : x - SC_LG_NGROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (size + delta_mask) & ~delta_mask; @@ -331,8 +337,8 @@ sz_s2u_compute(size_t size) { if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); - return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : - (ZU(1) << lg_ceil)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) + : (ZU(1) << lg_ceil)); } #endif if (size <= SC_SMALL_MAXCLASS || !sz_large_size_classes_disabled()) { diff --git a/include/jemalloc/internal/tcache_externs.h b/include/jemalloc/internal/tcache_externs.h index 76d601c3..73126db7 100644 --- a/include/jemalloc/internal/tcache_externs.h +++ b/include/jemalloc/internal/tcache_externs.h @@ -8,15 +8,15 @@ #include "jemalloc/internal/tcache_types.h" #include "jemalloc/internal/thread_event_registry.h" -extern bool opt_tcache; -extern size_t opt_tcache_max; -extern ssize_t opt_lg_tcache_nslots_mul; +extern bool opt_tcache; +extern size_t opt_tcache_max; +extern ssize_t opt_lg_tcache_nslots_mul; extern unsigned opt_tcache_nslots_small_min; extern unsigned opt_tcache_nslots_small_max; extern unsigned opt_tcache_nslots_large; -extern ssize_t opt_lg_tcache_shift; -extern size_t opt_tcache_gc_incr_bytes; -extern size_t opt_tcache_gc_delay_bytes; +extern ssize_t opt_lg_tcache_shift; +extern size_t opt_tcache_gc_incr_bytes; +extern size_t opt_tcache_gc_delay_bytes; extern unsigned opt_lg_tcache_flush_small_div; extern unsigned opt_lg_tcache_flush_large_div; @@ -27,14 +27,14 @@ extern unsigned opt_lg_tcache_flush_large_div; * it should not be changed on the fly. To change the number of tcache bins * in use, refer to tcache_nbins of each tcache. */ -extern unsigned global_do_not_change_tcache_nbins; +extern unsigned global_do_not_change_tcache_nbins; /* * Maximum cached size class. Same as above, this is only used during threads * initialization and should not be changed. To change the maximum cached size * class, refer to tcache_max of each tcache. */ -extern size_t global_do_not_change_tcache_maxclass; +extern size_t global_do_not_change_tcache_maxclass; /* * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and @@ -44,11 +44,11 @@ extern size_t global_do_not_change_tcache_maxclass; * touched. This allows the entire array to be allocated the first time an * explicit tcache is created without a disproportionate impact on memory usage. */ -extern tcaches_t *tcaches; +extern tcaches_t *tcaches; size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); -void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - cache_bin_t *cache_bin, szind_t binind, bool *tcache_success); +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *cache_bin, szind_t binind, bool *tcache_success); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind, unsigned rem); @@ -56,23 +56,23 @@ void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind, unsigned rem); void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind, bool is_small); -bool tcache_bin_info_default_init(const char *bin_settings_segment_cur, - size_t len_left); +bool tcache_bin_info_default_init( + const char *bin_settings_segment_cur, size_t len_left); bool tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len); -bool tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size, - cache_bin_sz_t *ncached_max); -void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, - tcache_t *tcache, arena_t *arena); +bool tcache_bin_ncached_max_read( + tsd_t *tsd, size_t bin_size, cache_bin_sz_t *ncached_max); +void tcache_arena_reassociate( + tsdn_t *tsdn, tcache_slow_t *tcache_slow, tcache_t *tcache, arena_t *arena); tcache_t *tcache_create_explicit(tsd_t *tsd); -void thread_tcache_max_set(tsd_t *tsd, size_t tcache_max); -void tcache_cleanup(tsd_t *tsd); -void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); -bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind); -void tcaches_flush(tsd_t *tsd, unsigned ind); -void tcaches_destroy(tsd_t *tsd, unsigned ind); -bool tcache_boot(tsdn_t *tsdn, base_t *base); -void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, - tcache_t *tcache, arena_t *arena); +void thread_tcache_max_set(tsd_t *tsd, size_t tcache_max); +void tcache_cleanup(tsd_t *tsd); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(tsdn_t *tsdn, base_t *base); +void tcache_arena_associate( + tsdn_t *tsdn, tcache_slow_t *tcache_slow, tcache_t *tcache, arena_t *arena); void tcache_prefork(tsdn_t *tsdn); void tcache_postfork_parent(tsdn_t *tsdn); void tcache_postfork_child(tsdn_t *tsdn); diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h index e8e3b41f..6bd1b339 100644 --- a/include/jemalloc/internal/tcache_inlines.h +++ b/include/jemalloc/internal/tcache_inlines.h @@ -42,8 +42,8 @@ tcache_max_set(tcache_slow_t *tcache_slow, size_t tcache_max) { } static inline void -tcache_bin_settings_backup(tcache_t *tcache, - cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) { +tcache_bin_settings_backup( + tcache_t *tcache, cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) { for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) { cache_bin_info_init(&tcache_bin_info[i], cache_bin_ncached_max_get_unsafe(&tcache->bins[i])); @@ -51,8 +51,7 @@ tcache_bin_settings_backup(tcache_t *tcache, } JEMALLOC_ALWAYS_INLINE bool -tcache_bin_disabled(szind_t ind, cache_bin_t *bin, - tcache_slow_t *tcache_slow) { +tcache_bin_disabled(szind_t ind, cache_bin_t *bin, tcache_slow_t *tcache_slow) { assert(bin != NULL); assert(ind < TCACHE_NBINS_MAX); bool disabled = cache_bin_disabled(bin); @@ -66,7 +65,7 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin, * ind >= nbins or ncached_max == 0. If a bin is enabled, it has * ind < nbins and ncached_max > 0. */ - unsigned nbins = tcache_nbins_get(tcache_slow); + unsigned nbins = tcache_nbins_get(tcache_slow); cache_bin_sz_t ncached_max = cache_bin_ncached_max_get_unsafe(bin); if (ind >= nbins) { assert(disabled); @@ -88,10 +87,10 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin, } JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - size_t size, szind_t binind, bool zero, bool slow_path) { +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) { void *ret; - bool tcache_success; + bool tcache_success; assert(binind < SC_NBINS); cache_bin_t *bin = &tcache->bins[binind]; @@ -103,8 +102,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, if (unlikely(arena == NULL)) { return NULL; } - if (unlikely(tcache_bin_disabled(binind, bin, - tcache->tcache_slow))) { + if (unlikely(tcache_bin_disabled( + binind, bin, tcache->tcache_slow))) { /* stats and zero are handled directly by the arena. */ return arena_malloc_hard(tsd_tsdn(tsd), arena, size, binind, zero, /* slab */ true); @@ -112,8 +111,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, tcache_bin_flush_stashed(tsd, tcache, bin, binind, /* is_small */ true); - ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, - bin, binind, &tcache_hard_success); + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, bin, + binind, &tcache_hard_success); if (tcache_hard_success == false) { return NULL; } @@ -135,11 +134,11 @@ JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; - bool tcache_success; + bool tcache_success; cache_bin_t *bin = &tcache->bins[binind]; - assert(binind >= SC_NBINS && - !tcache_bin_disabled(binind, bin, tcache->tcache_slow)); + assert(binind >= SC_NBINS + && !tcache_bin_disabled(binind, bin, tcache->tcache_slow)); ret = cache_bin_alloc(bin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { @@ -174,8 +173,8 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, } JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, - bool slow_path) { +tcache_dalloc_small( + tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS); cache_bin_t *bin = &tcache->bins[binind]; @@ -195,13 +194,13 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, } if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { - if (unlikely(tcache_bin_disabled(binind, bin, - tcache->tcache_slow))) { + if (unlikely(tcache_bin_disabled( + binind, bin, tcache->tcache_slow))) { arena_dalloc_small(tsd_tsdn(tsd), ptr); return; } cache_bin_sz_t max = cache_bin_ncached_max_get(bin); - unsigned remain = max >> opt_lg_tcache_flush_small_div; + unsigned remain = max >> opt_lg_tcache_flush_small_div; tcache_bin_flush_small(tsd, tcache, bin, binind, remain); bool ret = cache_bin_dalloc_easy(bin, ptr); assert(ret); @@ -209,19 +208,18 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, } JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, - bool slow_path) { - +tcache_dalloc_large( + tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS); - assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= - tcache_max_get(tcache->tcache_slow)); - assert(!tcache_bin_disabled(binind, &tcache->bins[binind], - tcache->tcache_slow)); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) + <= tcache_max_get(tcache->tcache_slow)); + assert(!tcache_bin_disabled( + binind, &tcache->bins[binind], tcache->tcache_slow)); cache_bin_t *bin = &tcache->bins[binind]; if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { - unsigned remain = cache_bin_ncached_max_get(bin) >> - opt_lg_tcache_flush_large_div; + unsigned remain = cache_bin_ncached_max_get(bin) + >> opt_lg_tcache_flush_large_div; tcache_bin_flush_large(tsd, tcache, bin, binind, remain); bool ret = cache_bin_dalloc_easy(bin, ptr); assert(ret); diff --git a/include/jemalloc/internal/tcache_structs.h b/include/jemalloc/internal/tcache_structs.h index e9a68152..2c000de3 100644 --- a/include/jemalloc/internal/tcache_structs.h +++ b/include/jemalloc/internal/tcache_structs.h @@ -30,45 +30,45 @@ struct tcache_slow_s { cache_bin_array_descriptor_t cache_bin_array_descriptor; /* The arena this tcache is associated with. */ - arena_t *arena; + arena_t *arena; /* The number of bins activated in the tcache. */ - unsigned tcache_nbins; + unsigned tcache_nbins; /* Last time GC has been performed. */ - nstime_t last_gc_time; + nstime_t last_gc_time; /* Next bin to GC. */ - szind_t next_gc_bin; - szind_t next_gc_bin_small; - szind_t next_gc_bin_large; + szind_t next_gc_bin; + szind_t next_gc_bin_small; + szind_t next_gc_bin_large; /* For small bins, help determine how many items to fill at a time. */ - cache_bin_fill_ctl_t bin_fill_ctl_do_not_access_directly[SC_NBINS]; + cache_bin_fill_ctl_t bin_fill_ctl_do_not_access_directly[SC_NBINS]; /* For small bins, whether has been refilled since last GC. */ - bool bin_refilled[SC_NBINS]; + bool bin_refilled[SC_NBINS]; /* * For small bins, the number of items we can pretend to flush before * actually flushing. */ - uint8_t bin_flush_delay_items[SC_NBINS]; + uint8_t bin_flush_delay_items[SC_NBINS]; /* * The start of the allocation containing the dynamic allocation for * either the cache bins alone, or the cache bin memory as well as this * tcache_slow_t and its associated tcache_t. */ - void *dyn_alloc; + void *dyn_alloc; /* The associated bins. */ - tcache_t *tcache; + tcache_t *tcache; }; struct tcache_s { - tcache_slow_t *tcache_slow; - cache_bin_t bins[TCACHE_NBINS_MAX]; + tcache_slow_t *tcache_slow; + cache_bin_t bins[TCACHE_NBINS_MAX]; }; /* Linkage for list of available (previously used) explicit tcache IDs. */ struct tcaches_s { union { - tcache_t *tcache; - tcaches_t *next; + tcache_t *tcache; + tcaches_t *next; }; }; diff --git a/include/jemalloc/internal/tcache_types.h b/include/jemalloc/internal/tcache_types.h index b3828ecf..27d80d3c 100644 --- a/include/jemalloc/internal/tcache_types.h +++ b/include/jemalloc/internal/tcache_types.h @@ -5,12 +5,16 @@ #include "jemalloc/internal/sc.h" typedef struct tcache_slow_s tcache_slow_t; -typedef struct tcache_s tcache_t; -typedef struct tcaches_s tcaches_t; +typedef struct tcache_s tcache_t; +typedef struct tcaches_s tcaches_t; /* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */ -#define TCACHE_ZERO_INITIALIZER {0} -#define TCACHE_SLOW_ZERO_INITIALIZER {{0}} +#define TCACHE_ZERO_INITIALIZER \ + { 0 } +#define TCACHE_SLOW_ZERO_INITIALIZER \ + { \ + { 0 } \ + } /* Used in TSD static initializer only. Will be initialized to opt_tcache. */ #define TCACHE_ENABLED_ZERO_INITIALIZER false @@ -21,9 +25,11 @@ typedef struct tcaches_s tcaches_t; #define TCACHE_LG_MAXCLASS_LIMIT LG_USIZE_GROW_SLOW_THRESHOLD #define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT) -#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \ - (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1) -#define TCACHE_GC_NEIGHBOR_LIMIT ((uintptr_t)1 << 21) /* 2M */ +#define TCACHE_NBINS_MAX \ + (SC_NBINS \ + + SC_NGROUP * (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) \ + + 1) +#define TCACHE_GC_NEIGHBOR_LIMIT ((uintptr_t)1 << 21) /* 2M */ #define TCACHE_GC_INTERVAL_NS ((uint64_t)10 * KQU(1000000)) /* 10ms */ #define TCACHE_GC_SMALL_NBINS_MAX ((SC_NBINS > 8) ? (SC_NBINS >> 3) : 1) #define TCACHE_GC_LARGE_NBINS_MAX 1 diff --git a/include/jemalloc/internal/test_hooks.h b/include/jemalloc/internal/test_hooks.h index af3f2755..35f3a211 100644 --- a/include/jemalloc/internal/test_hooks.h +++ b/include/jemalloc/internal/test_hooks.h @@ -7,20 +7,22 @@ extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(void); extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(void); #if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST) -# define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) +# define JEMALLOC_TEST_HOOK(fn, hook) \ + ((void)(hook != NULL && (hook(), 0)), fn) -# define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook) -# define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook) -# define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook) -# define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook) -# define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook) -# define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook) -# define secure_getenv JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook) +# define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook) +# define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook) +# define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook) +# define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook) +# define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook) +# define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook) +# define secure_getenv \ + JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook) /* Note that this is undef'd and re-define'd in src/prof.c. */ -# define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) +# define _Unwind_Backtrace \ + JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) #else -# define JEMALLOC_TEST_HOOK(fn, hook) fn +# define JEMALLOC_TEST_HOOK(fn, hook) fn #endif - #endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */ diff --git a/include/jemalloc/internal/thread_event.h b/include/jemalloc/internal/thread_event.h index bf9ca3cc..e9e2b6cd 100644 --- a/include/jemalloc/internal/thread_event.h +++ b/include/jemalloc/internal/thread_event.h @@ -37,7 +37,7 @@ #define TE_INVALID_ELAPSED UINT64_MAX typedef struct te_ctx_s { - bool is_alloc; + bool is_alloc; uint64_t *current; uint64_t *last_event; uint64_t *next_event; @@ -48,22 +48,20 @@ void te_assert_invariants_debug(tsd_t *tsd); void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx); void te_recompute_fast_threshold(tsd_t *tsd); void tsd_te_init(tsd_t *tsd); -void te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx, - uint64_t wait); +void te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx, uint64_t wait); /* List of all thread event counters. */ -#define ITERATE_OVER_ALL_COUNTERS \ - C(thread_allocated) \ - C(thread_allocated_last_event) \ - C(prof_sample_last_event) \ +#define ITERATE_OVER_ALL_COUNTERS \ + C(thread_allocated) \ + C(thread_allocated_last_event) \ + C(prof_sample_last_event) \ C(stats_interval_last_event) /* Getters directly wrap TSD getters. */ -#define C(counter) \ -JEMALLOC_ALWAYS_INLINE uint64_t \ -counter##_get(tsd_t *tsd) { \ - return tsd_##counter##_get(tsd); \ -} +#define C(counter) \ + JEMALLOC_ALWAYS_INLINE uint64_t counter##_get(tsd_t *tsd) { \ + return tsd_##counter##_get(tsd); \ + } ITERATE_OVER_ALL_COUNTERS #undef C @@ -75,11 +73,10 @@ ITERATE_OVER_ALL_COUNTERS * temporarily delay the event and let it be immediately triggered at the next * allocation call. */ -#define C(counter) \ -JEMALLOC_ALWAYS_INLINE void \ -counter##_set(tsd_t *tsd, uint64_t v) { \ - *tsd_##counter##p_get(tsd) = v; \ -} +#define C(counter) \ + JEMALLOC_ALWAYS_INLINE void counter##_set(tsd_t *tsd, uint64_t v) { \ + *tsd_##counter##p_get(tsd) = v; \ + } ITERATE_OVER_ALL_COUNTERS #undef C diff --git a/include/jemalloc/internal/thread_event_registry.h b/include/jemalloc/internal/thread_event_registry.h index 1957e727..7ded440d 100644 --- a/include/jemalloc/internal/thread_event_registry.h +++ b/include/jemalloc/internal/thread_event_registry.h @@ -87,8 +87,8 @@ typedef void (*user_event_cb_t)( typedef struct user_hook_object_s user_hook_object_t; struct user_hook_object_s { user_event_cb_t callback; - uint64_t interval; - bool is_alloc_only; + uint64_t interval; + bool is_alloc_only; }; /* diff --git a/include/jemalloc/internal/ticker.h b/include/jemalloc/internal/ticker.h index dca9bd10..a1eec628 100644 --- a/include/jemalloc/internal/ticker.h +++ b/include/jemalloc/internal/ticker.h @@ -53,7 +53,7 @@ ticker_read(const ticker_t *ticker) { * worth the hassle, but this is on the fast path of both malloc and free (via * tcache_event). */ -#if defined(__GNUC__) && !defined(__clang__) \ +#if defined(__GNUC__) && !defined(__clang__) \ && (defined(__x86_64__) || defined(__i386__)) JEMALLOC_NOINLINE #endif @@ -129,7 +129,8 @@ struct ticker_geom_s { * the behavior over long periods of time rather than the exact timing of the * initial ticks. */ -#define TICKER_GEOM_INIT(nticks) {nticks, nticks} +#define TICKER_GEOM_INIT(nticks) \ + { nticks, nticks } static inline void ticker_geom_init(ticker_geom_t *ticker, int32_t nticks) { @@ -150,22 +151,21 @@ ticker_geom_read(const ticker_geom_t *ticker) { } /* Same deal as above. */ -#if defined(__GNUC__) && !defined(__clang__) \ +#if defined(__GNUC__) && !defined(__clang__) \ && (defined(__x86_64__) || defined(__i386__)) JEMALLOC_NOINLINE #endif static bool -ticker_geom_fixup(ticker_geom_t *ticker, uint64_t *prng_state, - bool delay_trigger) { +ticker_geom_fixup( + ticker_geom_t *ticker, uint64_t *prng_state, bool delay_trigger) { if (delay_trigger) { ticker->tick = 0; return false; } uint64_t idx = prng_lg_range_u64(prng_state, TICKER_GEOM_NBITS); - ticker->tick = (uint32_t)( - (uint64_t)ticker->nticks * (uint64_t)ticker_geom_table[idx] - / (uint64_t)TICKER_GEOM_MUL); + ticker->tick = (uint32_t)((uint64_t)ticker->nticks + * (uint64_t)ticker_geom_table[idx] / (uint64_t)TICKER_GEOM_MUL); return true; } @@ -181,8 +181,8 @@ ticker_geom_ticks(ticker_geom_t *ticker, uint64_t *prng_state, int32_t nticks, } static inline bool -ticker_geom_tick(ticker_geom_t *ticker, uint64_t *prng_state, - bool delay_trigger) { +ticker_geom_tick( + ticker_geom_t *ticker, uint64_t *prng_state, bool delay_trigger) { return ticker_geom_ticks(ticker, prng_state, 1, delay_trigger); } diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h index c06605df..84101c65 100644 --- a/include/jemalloc/internal/tsd.h +++ b/include/jemalloc/internal/tsd.h @@ -7,14 +7,14 @@ * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#include "jemalloc/internal/jemalloc_preamble.h" -#include "jemalloc/internal/tsd_malloc_thread_cleanup.h" +# include "jemalloc/internal/jemalloc_preamble.h" +# include "jemalloc/internal/tsd_malloc_thread_cleanup.h" #elif (defined(JEMALLOC_TLS)) -#include "jemalloc/internal/tsd_tls.h" +# include "jemalloc/internal/tsd_tls.h" #elif (defined(_WIN32)) -#include "jemalloc/internal/tsd_win.h" +# include "jemalloc/internal/tsd_win.h" #else -#include "jemalloc/internal/tsd_generic.h" +# include "jemalloc/internal/tsd_generic.h" #endif /* @@ -22,11 +22,10 @@ * foo. This omits some safety checks, and so can be used during tsd * initialization and cleanup. */ -#define O(n, t, nt) \ -JEMALLOC_ALWAYS_INLINE t * \ -tsd_##n##p_get_unsafe(tsd_t *tsd) { \ - return &tsd->TSD_MANGLE(n); \ -} +#define O(n, t, nt) \ + JEMALLOC_ALWAYS_INLINE t *tsd_##n##p_get_unsafe(tsd_t *tsd) { \ + return &tsd->TSD_MANGLE(n); \ + } TSD_DATA_SLOW TSD_DATA_FAST TSD_DATA_SLOWER @@ -59,39 +58,36 @@ TSD_DATA_SLOWER * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. */ -#define O(n, t, nt) \ -JEMALLOC_ALWAYS_INLINE nt * \ -tsdn_##n##p_get(tsdn_t *tsdn) { \ - if (tsdn_null(tsdn)) { \ - return NULL; \ - } \ - tsd_t *tsd = tsdn_tsd(tsdn); \ - return (nt *)tsd_##n##p_get(tsd); \ -} +#define O(n, t, nt) \ + JEMALLOC_ALWAYS_INLINE nt *tsdn_##n##p_get(tsdn_t *tsdn) { \ + if (tsdn_null(tsdn)) { \ + return NULL; \ + } \ + tsd_t *tsd = tsdn_tsd(tsdn); \ + return (nt *)tsd_##n##p_get(tsd); \ + } TSD_DATA_SLOW TSD_DATA_FAST TSD_DATA_SLOWER #undef O /* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ -#define O(n, t, nt) \ -JEMALLOC_ALWAYS_INLINE t \ -tsd_##n##_get(tsd_t *tsd) { \ - return *tsd_##n##p_get(tsd); \ -} +#define O(n, t, nt) \ + JEMALLOC_ALWAYS_INLINE t tsd_##n##_get(tsd_t *tsd) { \ + return *tsd_##n##p_get(tsd); \ + } TSD_DATA_SLOW TSD_DATA_FAST TSD_DATA_SLOWER #undef O /* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ -#define O(n, t, nt) \ -JEMALLOC_ALWAYS_INLINE void \ -tsd_##n##_set(tsd_t *tsd, t val) { \ - assert(tsd_state_get(tsd) != tsd_state_reincarnated && \ - tsd_state_get(tsd) != tsd_state_minimal_initialized); \ - *tsd_##n##p_get(tsd) = val; \ -} +#define O(n, t, nt) \ + JEMALLOC_ALWAYS_INLINE void tsd_##n##_set(tsd_t *tsd, t val) { \ + assert(tsd_state_get(tsd) != tsd_state_reincarnated \ + && tsd_state_get(tsd) != tsd_state_minimal_initialized); \ + *tsd_##n##p_get(tsd) = val; \ + } TSD_DATA_SLOW TSD_DATA_FAST TSD_DATA_SLOWER @@ -104,8 +100,8 @@ tsd_assert_fast(tsd_t *tsd) { * counters; it's not in general possible to ensure that they won't * change asynchronously from underneath us. */ - assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && - tsd_reentrancy_level_get(tsd) == 0); + assert(!malloc_slow && tsd_tcache_enabled_get(tsd) + && tsd_reentrancy_level_get(tsd) == 0); } JEMALLOC_ALWAYS_INLINE bool @@ -194,8 +190,8 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { static inline bool tsd_state_nocleanup(tsd_t *tsd) { - return tsd_state_get(tsd) == tsd_state_reincarnated || - tsd_state_get(tsd) == tsd_state_minimal_initialized; + return tsd_state_get(tsd) == tsd_state_reincarnated + || tsd_state_get(tsd) == tsd_state_minimal_initialized; } /* diff --git a/include/jemalloc/internal/tsd_generic.h b/include/jemalloc/internal/tsd_generic.h index aa8042a4..e049766f 100644 --- a/include/jemalloc/internal/tsd_generic.h +++ b/include/jemalloc/internal/tsd_generic.h @@ -1,5 +1,5 @@ #ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H -#error This file should be included only once, by tsd.h. +# error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_GENERIC_H @@ -12,25 +12,24 @@ typedef struct tsd_init_block_s tsd_init_block_t; struct tsd_init_block_s { ql_elm(tsd_init_block_t) link; pthread_t thread; - void *data; + void *data; }; /* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ typedef struct tsd_init_head_s tsd_init_head_t; typedef struct { - bool initialized; + bool initialized; tsd_t val; } tsd_wrapper_t; -void *tsd_init_check_recursion(tsd_init_head_t *head, - tsd_init_block_t *block); -void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); +void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); -extern pthread_key_t tsd_tsd; +extern pthread_key_t tsd_tsd; extern tsd_init_head_t tsd_init_head; -extern tsd_wrapper_t tsd_boot_wrapper; -extern bool tsd_booted; +extern tsd_wrapper_t tsd_boot_wrapper; +extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE void @@ -42,8 +41,8 @@ tsd_cleanup_wrapper(void *arg) { tsd_cleanup(&wrapper->val); if (wrapper->initialized) { /* Trigger another cleanup round. */ - if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) - { + if (pthread_setspecific(tsd_tsd, (void *)wrapper) + != 0) { malloc_write(": Error setting TSD\n"); if (opt_abort) { abort(); @@ -78,23 +77,23 @@ tsd_wrapper_get(bool init) { if (init && unlikely(wrapper == NULL)) { tsd_init_block_t block; - wrapper = (tsd_wrapper_t *) - tsd_init_check_recursion(&tsd_init_head, &block); + wrapper = (tsd_wrapper_t *)tsd_init_check_recursion( + &tsd_init_head, &block); if (wrapper) { return wrapper; } - wrapper = (tsd_wrapper_t *) - malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + wrapper = (tsd_wrapper_t *)malloc_tsd_malloc( + sizeof(tsd_wrapper_t)); block.data = (void *)wrapper; if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); } else { wrapper->initialized = false; - JEMALLOC_DIAGNOSTIC_PUSH - JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS tsd_t initializer = TSD_INITIALIZER; - JEMALLOC_DIAGNOSTIC_POP + JEMALLOC_DIAGNOSTIC_POP wrapper->val = initializer; } tsd_wrapper_set(wrapper); @@ -105,11 +104,11 @@ tsd_wrapper_get(bool init) { JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { - tsd_wrapper_t *wrapper; + tsd_wrapper_t *wrapper; tsd_init_block_t block; - wrapper = (tsd_wrapper_t *) - tsd_init_check_recursion(&tsd_init_head, &block); + wrapper = (tsd_wrapper_t *)tsd_init_check_recursion( + &tsd_init_head, &block); if (wrapper) { return false; } @@ -134,10 +133,10 @@ tsd_boot1(void) { tsd_boot_wrapper.initialized = false; tsd_cleanup(&tsd_boot_wrapper.val); wrapper->initialized = false; - JEMALLOC_DIAGNOSTIC_PUSH - JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS tsd_t initializer = TSD_INITIALIZER; - JEMALLOC_DIAGNOSTIC_POP + JEMALLOC_DIAGNOSTIC_POP wrapper->val = initializer; tsd_wrapper_set(wrapper); } diff --git a/include/jemalloc/internal/tsd_internals.h b/include/jemalloc/internal/tsd_internals.h index 69b60519..f675587d 100644 --- a/include/jemalloc/internal/tsd_internals.h +++ b/include/jemalloc/internal/tsd_internals.h @@ -48,123 +48,113 @@ #ifdef JEMALLOC_JET typedef void (*test_callback_t)(int *); -# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 -# define MALLOC_TEST_TSD \ - O(test_data, int, int) \ - O(test_callback, test_callback_t, int) -# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL +# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 +# define MALLOC_TEST_TSD \ + O(test_data, int, int) \ + O(test_callback, test_callback_t, int) +# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL #else -# define MALLOC_TEST_TSD -# define MALLOC_TEST_TSD_INITIALIZER +# define MALLOC_TEST_TSD +# define MALLOC_TEST_TSD_INITIALIZER #endif typedef ql_elm(tsd_t) tsd_link_t; /* O(name, type, nullable type) */ -#define TSD_DATA_SLOW \ - O(tcache_enabled, bool, bool) \ - O(reentrancy_level, int8_t, int8_t) \ - O(min_init_state_nfetched, uint8_t, uint8_t) \ - O(thread_allocated_last_event, uint64_t, uint64_t) \ - O(thread_allocated_next_event, uint64_t, uint64_t) \ - O(thread_deallocated_last_event, uint64_t, uint64_t) \ - O(thread_deallocated_next_event, uint64_t, uint64_t) \ - O(te_data, te_data_t, te_data_t) \ - O(prof_sample_last_event, uint64_t, uint64_t) \ - O(stats_interval_last_event, uint64_t, uint64_t) \ - O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ - O(prng_state, uint64_t, uint64_t) \ - O(san_extents_until_guard_small, uint64_t, uint64_t) \ - O(san_extents_until_guard_large, uint64_t, uint64_t) \ - O(iarena, arena_t *, arena_t *) \ - O(arena, arena_t *, arena_t *) \ - O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \ - O(sec_shard, uint8_t, uint8_t) \ - O(binshards, tsd_binshards_t, tsd_binshards_t)\ - O(tsd_link, tsd_link_t, tsd_link_t) \ - O(in_hook, bool, bool) \ - O(peak, peak_t, peak_t) \ - O(activity_callback_thunk, activity_callback_thunk_t, \ - activity_callback_thunk_t) \ - O(tcache_slow, tcache_slow_t, tcache_slow_t) \ - O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) +#define TSD_DATA_SLOW \ + O(tcache_enabled, bool, bool) \ + O(reentrancy_level, int8_t, int8_t) \ + O(min_init_state_nfetched, uint8_t, uint8_t) \ + O(thread_allocated_last_event, uint64_t, uint64_t) \ + O(thread_allocated_next_event, uint64_t, uint64_t) \ + O(thread_deallocated_last_event, uint64_t, uint64_t) \ + O(thread_deallocated_next_event, uint64_t, uint64_t) \ + O(te_data, te_data_t, te_data_t) \ + O(prof_sample_last_event, uint64_t, uint64_t) \ + O(stats_interval_last_event, uint64_t, uint64_t) \ + O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ + O(prng_state, uint64_t, uint64_t) \ + O(san_extents_until_guard_small, uint64_t, uint64_t) \ + O(san_extents_until_guard_large, uint64_t, uint64_t) \ + O(iarena, arena_t *, arena_t *) \ + O(arena, arena_t *, arena_t *) \ + O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \ + O(sec_shard, uint8_t, uint8_t) \ + O(binshards, tsd_binshards_t, tsd_binshards_t) \ + O(tsd_link, tsd_link_t, tsd_link_t) \ + O(in_hook, bool, bool) \ + O(peak, peak_t, peak_t) \ + O(activity_callback_thunk, activity_callback_thunk_t, \ + activity_callback_thunk_t) \ + O(tcache_slow, tcache_slow_t, tcache_slow_t) \ + O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) -#define TSD_DATA_SLOW_INITIALIZER \ - /* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \ - /* reentrancy_level */ 0, \ - /* min_init_state_nfetched */ 0, \ - /* thread_allocated_last_event */ 0, \ - /* thread_allocated_next_event */ 0, \ - /* thread_deallocated_last_event */ 0, \ - /* thread_deallocated_next_event */ 0, \ - /* te_data */ TE_DATA_INITIALIZER, \ - /* prof_sample_last_event */ 0, \ - /* stats_interval_last_event */ 0, \ - /* prof_tdata */ NULL, \ - /* prng_state */ 0, \ - /* san_extents_until_guard_small */ 0, \ - /* san_extents_until_guard_large */ 0, \ - /* iarena */ NULL, \ - /* arena */ NULL, \ - /* arena_decay_ticker */ \ - TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \ - /* sec_shard */ (uint8_t)-1, \ - /* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \ - /* tsd_link */ {NULL}, \ - /* in_hook */ false, \ - /* peak */ PEAK_INITIALIZER, \ - /* activity_callback_thunk */ \ - ACTIVITY_CALLBACK_THUNK_INITIALIZER, \ - /* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \ - /* rtree_ctx */ RTREE_CTX_INITIALIZER, +#define TSD_DATA_SLOW_INITIALIZER \ + /* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \ + /* reentrancy_level */ 0, /* min_init_state_nfetched */ 0, \ + /* thread_allocated_last_event */ 0, \ + /* thread_allocated_next_event */ 0, \ + /* thread_deallocated_last_event */ 0, \ + /* thread_deallocated_next_event */ 0, \ + /* te_data */ TE_DATA_INITIALIZER, /* prof_sample_last_event */ 0, \ + /* stats_interval_last_event */ 0, /* prof_tdata */ NULL, \ + /* prng_state */ 0, /* san_extents_until_guard_small */ 0, \ + /* san_extents_until_guard_large */ 0, /* iarena */ NULL, \ + /* arena */ NULL, /* arena_decay_ticker */ \ + TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \ + /* sec_shard */ (uint8_t) - 1, \ + /* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \ + /* tsd_link */ {NULL}, /* in_hook */ false, \ + /* peak */ PEAK_INITIALIZER, /* activity_callback_thunk */ \ + ACTIVITY_CALLBACK_THUNK_INITIALIZER, \ + /* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \ + /* rtree_ctx */ RTREE_CTX_INITIALIZER, /* O(name, type, nullable type) */ -#define TSD_DATA_FAST \ - O(thread_allocated, uint64_t, uint64_t) \ - O(thread_allocated_next_event_fast, uint64_t, uint64_t) \ - O(thread_deallocated, uint64_t, uint64_t) \ - O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \ - O(tcache, tcache_t, tcache_t) +#define TSD_DATA_FAST \ + O(thread_allocated, uint64_t, uint64_t) \ + O(thread_allocated_next_event_fast, uint64_t, uint64_t) \ + O(thread_deallocated, uint64_t, uint64_t) \ + O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \ + O(tcache, tcache_t, tcache_t) -#define TSD_DATA_FAST_INITIALIZER \ - /* thread_allocated */ 0, \ - /* thread_allocated_next_event_fast */ 0, \ - /* thread_deallocated */ 0, \ - /* thread_deallocated_next_event_fast */ 0, \ - /* tcache */ TCACHE_ZERO_INITIALIZER, +#define TSD_DATA_FAST_INITIALIZER \ + /* thread_allocated */ 0, /* thread_allocated_next_event_fast */ 0, \ + /* thread_deallocated */ 0, \ + /* thread_deallocated_next_event_fast */ 0, \ + /* tcache */ TCACHE_ZERO_INITIALIZER, /* O(name, type, nullable type) */ -#define TSD_DATA_SLOWER \ - O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ - MALLOC_TEST_TSD +#define TSD_DATA_SLOWER \ + O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ + MALLOC_TEST_TSD -#define TSD_DATA_SLOWER_INITIALIZER \ - /* witness */ WITNESS_TSD_INITIALIZER \ - /* test data */ MALLOC_TEST_TSD_INITIALIZER +#define TSD_DATA_SLOWER_INITIALIZER \ + /* witness */ WITNESS_TSD_INITIALIZER \ + /* test data */ MALLOC_TEST_TSD_INITIALIZER - -#define TSD_INITIALIZER { \ - TSD_DATA_SLOW_INITIALIZER \ - /* state */ ATOMIC_INIT(tsd_state_uninitialized), \ - TSD_DATA_FAST_INITIALIZER \ - TSD_DATA_SLOWER_INITIALIZER \ -} +#define TSD_INITIALIZER \ + { \ + TSD_DATA_SLOW_INITIALIZER \ + /* state */ ATOMIC_INIT(tsd_state_uninitialized), \ + TSD_DATA_FAST_INITIALIZER TSD_DATA_SLOWER_INITIALIZER \ + } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) void _malloc_tsd_cleanup_register(bool (*f)(void)); #endif -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); tsd_t *malloc_tsd_boot0(void); -void malloc_tsd_boot1(void); -void tsd_cleanup(void *arg); +void malloc_tsd_boot1(void); +void tsd_cleanup(void *arg); tsd_t *tsd_fetch_slow(tsd_t *tsd, bool minimal); -void tsd_state_set(tsd_t *tsd, uint8_t new_state); -void tsd_slow_update(tsd_t *tsd); -void tsd_prefork(tsd_t *tsd); -void tsd_postfork_parent(tsd_t *tsd); -void tsd_postfork_child(tsd_t *tsd); +void tsd_state_set(tsd_t *tsd, uint8_t new_state); +void tsd_slow_update(tsd_t *tsd); +void tsd_prefork(tsd_t *tsd); +void tsd_postfork_parent(tsd_t *tsd); +void tsd_postfork_child(tsd_t *tsd); /* * Call ..._inc when your module wants to take all threads down the slow paths, @@ -224,15 +214,15 @@ enum { #define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n #ifdef JEMALLOC_U8_ATOMICS -# define tsd_state_t atomic_u8_t -# define tsd_atomic_load atomic_load_u8 -# define tsd_atomic_store atomic_store_u8 -# define tsd_atomic_exchange atomic_exchange_u8 +# define tsd_state_t atomic_u8_t +# define tsd_atomic_load atomic_load_u8 +# define tsd_atomic_store atomic_store_u8 +# define tsd_atomic_exchange atomic_exchange_u8 #else -# define tsd_state_t atomic_u32_t -# define tsd_atomic_load atomic_load_u32 -# define tsd_atomic_store atomic_store_u32 -# define tsd_atomic_exchange atomic_exchange_u32 +# define tsd_state_t atomic_u32_t +# define tsd_atomic_load atomic_load_u32 +# define tsd_atomic_store atomic_store_u32 +# define tsd_atomic_exchange atomic_exchange_u32 #endif /* The actual tsd. */ @@ -243,8 +233,7 @@ struct tsd_s { * setters below. */ -#define O(n, t, nt) \ - t TSD_MANGLE(n); +#define O(n, t, nt) t TSD_MANGLE(n); TSD_DATA_SLOW /* diff --git a/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/include/jemalloc/internal/tsd_malloc_thread_cleanup.h index fb9ea1b4..00756df1 100644 --- a/include/jemalloc/internal/tsd_malloc_thread_cleanup.h +++ b/include/jemalloc/internal/tsd_malloc_thread_cleanup.h @@ -1,5 +1,5 @@ #ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H -#error This file should be included only once, by tsd.h. +# error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H diff --git a/include/jemalloc/internal/tsd_tls.h b/include/jemalloc/internal/tsd_tls.h index 5e5a6e5e..6536eb54 100644 --- a/include/jemalloc/internal/tsd_tls.h +++ b/include/jemalloc/internal/tsd_tls.h @@ -1,5 +1,5 @@ #ifdef JEMALLOC_INTERNAL_TSD_TLS_H -#error This file should be included only once, by tsd.h. +# error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_TLS_H @@ -11,7 +11,7 @@ extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; extern pthread_key_t tsd_tsd; -extern bool tsd_booted; +extern bool tsd_booted; /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool diff --git a/include/jemalloc/internal/tsd_types.h b/include/jemalloc/internal/tsd_types.h index 73bbe486..46479506 100644 --- a/include/jemalloc/internal/tsd_types.h +++ b/include/jemalloc/internal/tsd_types.h @@ -1,11 +1,11 @@ #ifndef JEMALLOC_INTERNAL_TSD_TYPES_H #define JEMALLOC_INTERNAL_TSD_TYPES_H -#define MALLOC_TSD_CLEANUPS_MAX 4 +#define MALLOC_TSD_CLEANUPS_MAX 4 #include "jemalloc/internal/jemalloc_preamble.h" -typedef struct tsd_s tsd_t; +typedef struct tsd_s tsd_t; typedef struct tsdn_s tsdn_t; typedef bool (*malloc_tsd_cleanup_t)(void); diff --git a/include/jemalloc/internal/tsd_win.h b/include/jemalloc/internal/tsd_win.h index 559ee78f..8b22bec1 100644 --- a/include/jemalloc/internal/tsd_win.h +++ b/include/jemalloc/internal/tsd_win.h @@ -1,5 +1,5 @@ #ifdef JEMALLOC_INTERNAL_TSD_WIN_H -#error This file should be included only once, by tsd.h. +# error This file should be included only once, by tsd.h. #endif #define JEMALLOC_INTERNAL_TSD_WIN_H @@ -13,29 +13,29 @@ than a type cast. */ typedef struct { tsd_t val; - bool initialized; + bool initialized; } tsd_wrapper_t; #if defined(JEMALLOC_LEGACY_WINDOWS_SUPPORT) || !defined(_MSC_VER) -extern DWORD tsd_tsd; +extern DWORD tsd_tsd; extern tsd_wrapper_t tsd_boot_wrapper; -extern bool tsd_booted; -#if defined(_M_ARM64EC) -#define JEMALLOC_WIN32_TLSGETVALUE2 0 -#else -#define JEMALLOC_WIN32_TLSGETVALUE2 1 -#endif -#if JEMALLOC_WIN32_TLSGETVALUE2 -typedef LPVOID (WINAPI *TGV2)(DWORD dwTlsIndex); -extern TGV2 tls_get_value2; +extern bool tsd_booted; +# if defined(_M_ARM64EC) +# define JEMALLOC_WIN32_TLSGETVALUE2 0 +# else +# define JEMALLOC_WIN32_TLSGETVALUE2 1 +# endif +# if JEMALLOC_WIN32_TLSGETVALUE2 +typedef LPVOID(WINAPI *TGV2)(DWORD dwTlsIndex); +extern TGV2 tls_get_value2; extern HMODULE tgv2_mod; -#endif +# endif /* Initialization/cleanup. */ JEMALLOC_ALWAYS_INLINE bool tsd_cleanup_wrapper(void) { - DWORD error = GetLastError(); + DWORD error = GetLastError(); tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); SetLastError(error); @@ -66,20 +66,20 @@ tsd_wrapper_set(tsd_wrapper_t *wrapper) { JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * tsd_wrapper_get(bool init) { tsd_wrapper_t *wrapper; -#if JEMALLOC_WIN32_TLSGETVALUE2 +# if JEMALLOC_WIN32_TLSGETVALUE2 if (tls_get_value2 != NULL) { - wrapper = (tsd_wrapper_t *) tls_get_value2(tsd_tsd); + wrapper = (tsd_wrapper_t *)tls_get_value2(tsd_tsd); } else -#endif +# endif { DWORD error = GetLastError(); - wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd); + wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); SetLastError(error); } if (init && unlikely(wrapper == NULL)) { - wrapper = (tsd_wrapper_t *) - malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + wrapper = (tsd_wrapper_t *)malloc_tsd_malloc( + sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); @@ -102,12 +102,12 @@ tsd_boot0(void) { } _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); tsd_wrapper_set(&tsd_boot_wrapper); -#if JEMALLOC_WIN32_TLSGETVALUE2 +# if JEMALLOC_WIN32_TLSGETVALUE2 tgv2_mod = LoadLibraryA("api-ms-win-core-processthreads-l1-1-8.dll"); if (tgv2_mod != NULL) { tls_get_value2 = (TGV2)GetProcAddress(tgv2_mod, "TlsGetValue2"); } -#endif +# endif tsd_booted = true; return false; } @@ -115,8 +115,7 @@ tsd_boot0(void) { JEMALLOC_ALWAYS_INLINE void tsd_boot1(void) { tsd_wrapper_t *wrapper; - wrapper = (tsd_wrapper_t *) - malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t)); if (wrapper == NULL) { malloc_write(": Error allocating TSD\n"); abort(); @@ -174,7 +173,7 @@ tsd_set(tsd_t *val) { #else // defined(JEMALLOC_LEGACY_WINDOWS_SUPPORT) || !defined(_MSC_VER) -#define JEMALLOC_TSD_TYPE_ATTR(type) __declspec(thread) type +# define JEMALLOC_TSD_TYPE_ATTR(type) __declspec(thread) type extern JEMALLOC_TSD_TYPE_ATTR(tsd_wrapper_t) tsd_wrapper_tls; extern bool tsd_booted; diff --git a/include/jemalloc/internal/typed_list.h b/include/jemalloc/internal/typed_list.h index 7c4826fc..78704e48 100644 --- a/include/jemalloc/internal/typed_list.h +++ b/include/jemalloc/internal/typed_list.h @@ -6,54 +6,49 @@ * bit easier to use; it handles ql_elm_new calls and provides type safety. */ -#define TYPED_LIST(list_type, el_type, linkage) \ -typedef struct { \ - ql_head(el_type) head; \ -} list_type##_t; \ -static inline void \ -list_type##_init(list_type##_t *list) { \ - ql_new(&list->head); \ -} \ -static inline el_type * \ -list_type##_first(const list_type##_t *list) { \ - return ql_first(&list->head); \ -} \ -static inline el_type * \ -list_type##_last(const list_type##_t *list) { \ - return ql_last(&list->head, linkage); \ -} \ -static inline el_type * \ -list_type##_next(const list_type##_t *list, el_type *item) { \ - return ql_next(&list->head, item, linkage); \ -} \ -static inline void \ -list_type##_append(list_type##_t *list, el_type *item) { \ - ql_elm_new(item, linkage); \ - ql_tail_insert(&list->head, item, linkage); \ -} \ -static inline void \ -list_type##_prepend(list_type##_t *list, el_type *item) { \ - ql_elm_new(item, linkage); \ - ql_head_insert(&list->head, item, linkage); \ -} \ -static inline void \ -list_type##_replace(list_type##_t *list, el_type *to_remove, \ - el_type *to_insert) { \ - ql_elm_new(to_insert, linkage); \ - ql_after_insert(to_remove, to_insert, linkage); \ - ql_remove(&list->head, to_remove, linkage); \ -} \ -static inline void \ -list_type##_remove(list_type##_t *list, el_type *item) { \ - ql_remove(&list->head, item, linkage); \ -} \ -static inline bool \ -list_type##_empty(list_type##_t *list) { \ - return ql_empty(&list->head); \ -} \ -static inline void \ -list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) { \ - ql_concat(&list_a->head, &list_b->head, linkage); \ -} +#define TYPED_LIST(list_type, el_type, linkage) \ + typedef struct { \ + ql_head(el_type) head; \ + } list_type##_t; \ + static inline void list_type##_init(list_type##_t *list) { \ + ql_new(&list->head); \ + } \ + static inline el_type *list_type##_first(const list_type##_t *list) { \ + return ql_first(&list->head); \ + } \ + static inline el_type *list_type##_last(const list_type##_t *list) { \ + return ql_last(&list->head, linkage); \ + } \ + static inline el_type *list_type##_next( \ + const list_type##_t *list, el_type *item) { \ + return ql_next(&list->head, item, linkage); \ + } \ + static inline void list_type##_append( \ + list_type##_t *list, el_type *item) { \ + ql_elm_new(item, linkage); \ + ql_tail_insert(&list->head, item, linkage); \ + } \ + static inline void list_type##_prepend( \ + list_type##_t *list, el_type *item) { \ + ql_elm_new(item, linkage); \ + ql_head_insert(&list->head, item, linkage); \ + } \ + static inline void list_type##_replace( \ + list_type##_t *list, el_type *to_remove, el_type *to_insert) { \ + ql_elm_new(to_insert, linkage); \ + ql_after_insert(to_remove, to_insert, linkage); \ + ql_remove(&list->head, to_remove, linkage); \ + } \ + static inline void list_type##_remove( \ + list_type##_t *list, el_type *item) { \ + ql_remove(&list->head, item, linkage); \ + } \ + static inline bool list_type##_empty(list_type##_t *list) { \ + return ql_empty(&list->head); \ + } \ + static inline void list_type##_concat( \ + list_type##_t *list_a, list_type##_t *list_b) { \ + ql_concat(&list_a->head, &list_b->head, linkage); \ + } #endif /* JEMALLOC_INTERNAL_TYPED_LIST_H */ diff --git a/include/jemalloc/internal/util.h b/include/jemalloc/internal/util.h index 35aa26e6..bf246c95 100644 --- a/include/jemalloc/internal/util.h +++ b/include/jemalloc/internal/util.h @@ -8,10 +8,10 @@ /* Junk fill patterns. */ #ifndef JEMALLOC_ALLOC_JUNK -# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) #endif #ifndef JEMALLOC_FREE_JUNK -# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) #endif /* @@ -32,20 +32,20 @@ #define JEMALLOC_CC_SILENCE_INIT(...) = __VA_ARGS__ #ifdef __GNUC__ -# define likely(x) __builtin_expect(!!(x), 1) -# define unlikely(x) __builtin_expect(!!(x), 0) +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) #else -# define likely(x) !!(x) -# define unlikely(x) !!(x) +# define likely(x) !!(x) +# define unlikely(x) !!(x) #endif #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L -#include +# include #else -#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) -# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure -#endif -#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() +# if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure +# endif +# define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() #endif /* Set error code. */ @@ -69,27 +69,27 @@ get_errno(void) { } #ifdef _MSC_VER -#define util_assume __assume -#elif defined(__clang__) && (__clang_major__ > 3 || \ - (__clang_major__ == 3 && __clang_minor__ >= 6)) -#define util_assume __builtin_assume +# define util_assume __assume +#elif defined(__clang__) \ + && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6)) +# define util_assume __builtin_assume #else -#define util_assume(expr) \ - do { \ - if (!(expr)) { \ - unreachable(); \ - } \ - } while(0) +# define util_assume(expr) \ + do { \ + if (!(expr)) { \ + unreachable(); \ + } \ + } while (0) #endif /* Allows compiler constant folding on inlined paths. */ #if defined(__has_builtin) -# if __has_builtin(__builtin_constant_p) -# define util_compile_time_const(x) __builtin_constant_p(x) -# endif +# if __has_builtin(__builtin_constant_p) +# define util_compile_time_const(x) __builtin_constant_p(x) +# endif #endif #ifndef util_compile_time_const -# define util_compile_time_const(x) (false) +# define util_compile_time_const(x) (false) #endif /* ptr should be valid. */ @@ -148,7 +148,6 @@ util_prefetch_write_range(void *ptr, size_t sz) { * key1-key2:value|key3-key4:value|... * Note it does not handle the ending '\0'. */ -bool -multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left, - size_t *key_start, size_t *key_end, size_t *value); +bool multi_setting_parse_next(const char **setting_segment_cur, + size_t *len_left, size_t *key_start, size_t *key_end, size_t *value); #endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h index acf7860d..73770713 100644 --- a/include/jemalloc/internal/witness.h +++ b/include/jemalloc/internal/witness.h @@ -66,8 +66,8 @@ enum witness_rank_e { WITNESS_RANK_HOOK, WITNESS_RANK_BIN, - WITNESS_RANK_LEAF=0x1000, - WITNESS_RANK_BATCHER=WITNESS_RANK_LEAF, + WITNESS_RANK_LEAF = 0x1000, + WITNESS_RANK_BATCHER = WITNESS_RANK_LEAF, WITNESS_RANK_ARENA_STATS = WITNESS_RANK_LEAF, WITNESS_RANK_COUNTER_ACCUM = WITNESS_RANK_LEAF, WITNESS_RANK_DSS = WITNESS_RANK_LEAF, @@ -86,38 +86,43 @@ typedef enum witness_rank_e witness_rank_t; /* PER-WITNESS DATA */ /******************************************************************************/ #if defined(JEMALLOC_DEBUG) -# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}} +# define WITNESS_INITIALIZER(name, rank) \ + { \ + name, rank, NULL, NULL, { \ + NULL, NULL \ + } \ + } #else -# define WITNESS_INITIALIZER(name, rank) +# define WITNESS_INITIALIZER(name, rank) #endif typedef struct witness_s witness_t; typedef ql_head(witness_t) witness_list_t; -typedef int witness_comp_t (const witness_t *, void *, const witness_t *, - void *); +typedef int witness_comp_t( + const witness_t *, void *, const witness_t *, void *); struct witness_s { /* Name, used for printing lock order reversal messages. */ - const char *name; + const char *name; /* * Witness rank, where 0 is lowest and WITNESS_RANK_LEAF is highest. * Witnesses must be acquired in order of increasing rank. */ - witness_rank_t rank; + witness_rank_t rank; /* * If two witnesses are of equal rank and they have the samp comp * function pointer, it is called as a last attempt to differentiate * between witnesses of equal rank. */ - witness_comp_t *comp; + witness_comp_t *comp; /* Opaque data, passed to comp(). */ - void *opaque; + void *opaque; /* Linkage for thread's currently owned locks. */ - ql_elm(witness_t) link; + ql_elm(witness_t) link; }; /******************************************************************************/ @@ -126,10 +131,11 @@ struct witness_s { typedef struct witness_tsd_s witness_tsd_t; struct witness_tsd_s { witness_list_t witnesses; - bool forking; + bool forking; }; -#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false } +#define WITNESS_TSD_INITIALIZER \ + { ql_head_initializer(witnesses), false } #define WITNESS_TSDN_NULL ((witness_tsdn_t *)0) /******************************************************************************/ @@ -162,17 +168,17 @@ witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) { void witness_init(witness_t *witness, const char *name, witness_rank_t rank, witness_comp_t *comp, void *opaque); -typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); +typedef void(witness_lock_error_t)(const witness_list_t *, const witness_t *); extern witness_lock_error_t *JET_MUTABLE witness_lock_error; -typedef void (witness_owner_error_t)(const witness_t *); +typedef void(witness_owner_error_t)(const witness_t *); extern witness_owner_error_t *JET_MUTABLE witness_owner_error; -typedef void (witness_not_owner_error_t)(const witness_t *); +typedef void(witness_not_owner_error_t)(const witness_t *); extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error; -typedef void (witness_depth_error_t)(const witness_list_t *, - witness_rank_t rank_inclusive, unsigned depth); +typedef void(witness_depth_error_t)( + const witness_list_t *, witness_rank_t rank_inclusive, unsigned depth); extern witness_depth_error_t *JET_MUTABLE witness_depth_error; void witnesses_cleanup(witness_tsd_t *witness_tsd); @@ -184,12 +190,12 @@ void witness_postfork_child(witness_tsd_t *witness_tsd); static inline bool witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) { witness_list_t *witnesses; - witness_t *w; + witness_t *w; cassert(config_debug); witnesses = &witness_tsd->witnesses; - ql_foreach(w, witnesses, link) { + ql_foreach (w, witnesses, link) { if (w == witness) { return true; } @@ -221,11 +227,11 @@ witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { } static inline void -witness_assert_not_owner(witness_tsdn_t *witness_tsdn, - const witness_t *witness) { - witness_tsd_t *witness_tsd; +witness_assert_not_owner( + witness_tsdn_t *witness_tsdn, const witness_t *witness) { + witness_tsd_t *witness_tsd; witness_list_t *witnesses; - witness_t *w; + witness_t *w; if (!config_debug) { return; @@ -240,7 +246,7 @@ witness_assert_not_owner(witness_tsdn_t *witness_tsdn, } witnesses = &witness_tsd->witnesses; - ql_foreach(w, witnesses, link) { + ql_foreach (w, witnesses, link) { if (w == witness) { witness_not_owner_error(witness); } @@ -249,9 +255,9 @@ witness_assert_not_owner(witness_tsdn_t *witness_tsdn, /* Returns depth. Not intended for direct use. */ static inline unsigned -witness_depth_to_rank(witness_list_t *witnesses, witness_rank_t rank_inclusive) -{ - unsigned d = 0; +witness_depth_to_rank( + witness_list_t *witnesses, witness_rank_t rank_inclusive) { + unsigned d = 0; witness_t *w = ql_last(witnesses, link); if (w != NULL) { @@ -274,7 +280,7 @@ witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, } witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses; - unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); + unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); if (d != depth) { witness_depth_error(witnesses, rank_inclusive, depth); @@ -292,14 +298,14 @@ witness_assert_lockless(witness_tsdn_t *witness_tsdn) { } static inline void -witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn, - witness_rank_t rank_inclusive) { +witness_assert_positive_depth_to_rank( + witness_tsdn_t *witness_tsdn, witness_rank_t rank_inclusive) { if (!config_debug || witness_tsdn_null(witness_tsdn)) { return; } witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses; - unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); + unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); if (d == 0) { witness_depth_error(witnesses, rank_inclusive, 1); @@ -308,9 +314,9 @@ witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn, static inline void witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { - witness_tsd_t *witness_tsd; + witness_tsd_t *witness_tsd; witness_list_t *witnesses; - witness_t *w; + witness_t *w; if (!config_debug) { return; @@ -335,9 +341,9 @@ witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { } else if (w->rank > witness->rank) { /* Not forking, rank order reversal. */ witness_lock_error(witnesses, witness); - } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != - witness->comp || w->comp(w, w->opaque, witness, witness->opaque) > - 0)) { + } else if (w->rank == witness->rank + && (w->comp == NULL || w->comp != witness->comp + || w->comp(w, w->opaque, witness, witness->opaque) > 0)) { /* * Missing/incompatible comparison function, or comparison * function indicates rank order reversal. @@ -346,15 +352,15 @@ witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { } /* Suppress spurious warning from static analysis */ - assert(ql_empty(witnesses) || - qr_prev(ql_first(witnesses), link) != NULL); + assert( + ql_empty(witnesses) || qr_prev(ql_first(witnesses), link) != NULL); ql_elm_new(witness, link); ql_tail_insert(witnesses, witness, link); } static inline void witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) { - witness_tsd_t *witness_tsd; + witness_tsd_t *witness_tsd; witness_list_t *witnesses; if (!config_debug) { diff --git a/include/msvc_compat/C99/stdint.h b/include/msvc_compat/C99/stdint.h index c66fbb81..5ee3992b 100644 --- a/include/msvc_compat/C99/stdint.h +++ b/include/msvc_compat/C99/stdint.h @@ -30,39 +30,39 @@ /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" +# error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ +# define _MSC_STDINT_H_ -#if _MSC_VER > 1000 -#pragma once -#endif +# if _MSC_VER > 1000 +# pragma once +# endif -#include +# include // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus +# ifdef __cplusplus extern "C" { -#endif -# include -#ifdef __cplusplus +# endif +# include +# ifdef __cplusplus } -#endif +# endif // Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - +# ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) \ + && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +# endif // 7.18.1 Integer types @@ -71,177 +71,177 @@ extern "C" { // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - +# if (_MSC_VER < 1300) +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +# else +typedef signed __int8 int8_t; +typedef signed __int16 int16_t; +typedef signed __int32 int32_t; +typedef unsigned __int8 uint8_t; +typedef unsigned __int16 uint16_t; +typedef unsigned __int32 uint32_t; +# endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] +# ifdef _WIN64 // [ +typedef signed __int64 intptr_t; +typedef unsigned __int64 uintptr_t; +# else // _WIN64 ][ +typedef _W64 signed int intptr_t; +typedef _W64 unsigned int uintptr_t; +# endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 +# if !defined(__cplusplus) \ + || defined( \ + __STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX +# define INT8_MIN ((int8_t)_I8_MIN) +# define INT8_MAX _I8_MAX +# define INT16_MIN ((int16_t)_I16_MIN) +# define INT16_MAX _I16_MAX +# define INT32_MIN ((int32_t)_I32_MIN) +# define INT32_MAX _I32_MAX +# define INT64_MIN ((int64_t)_I64_MIN) +# define INT64_MAX _I64_MAX +# define UINT8_MAX _UI8_MAX +# define UINT16_MAX _UI16_MAX +# define UINT32_MAX _UI32_MAX +# define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX +# define INT_LEAST8_MIN INT8_MIN +# define INT_LEAST8_MAX INT8_MAX +# define INT_LEAST16_MIN INT16_MIN +# define INT_LEAST16_MAX INT16_MAX +# define INT_LEAST32_MIN INT32_MIN +# define INT_LEAST32_MAX INT32_MAX +# define INT_LEAST64_MIN INT64_MIN +# define INT_LEAST64_MAX INT64_MAX +# define UINT_LEAST8_MAX UINT8_MAX +# define UINT_LEAST16_MAX UINT16_MAX +# define UINT_LEAST32_MAX UINT32_MAX +# define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX +# define INT_FAST8_MIN INT8_MIN +# define INT_FAST8_MAX INT8_MAX +# define INT_FAST16_MIN INT16_MIN +# define INT_FAST16_MAX INT16_MAX +# define INT_FAST32_MIN INT32_MIN +# define INT_FAST32_MAX INT32_MAX +# define INT_FAST64_MIN INT64_MIN +# define INT_FAST64_MAX INT64_MAX +# define UINT_FAST8_MAX UINT8_MAX +# define UINT_FAST16_MAX UINT16_MAX +# define UINT_FAST32_MAX UINT32_MAX +# define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] +# ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +# else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +# endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX +# define INTMAX_MIN INT64_MIN +# define INTMAX_MAX INT64_MAX +# define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] +# ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +# else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +# endif // _WIN64 ] -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX +# define SIG_ATOMIC_MIN INT_MIN +# define SIG_ATOMIC_MAX INT_MAX -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] +# ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +# endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] +# ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +# endif // WCHAR_MIN ] +# ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +# endif // WCHAR_MAX ] -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] +# define WINT_MIN 0 +# define WINT_MAX _UI16_MAX +# endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 +# if !defined(__cplusplus) \ + || defined( \ + __STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 +# define INT8_C(val) val##i8 +# define INT16_C(val) val##i16 +# define INT32_C(val) val##i32 +# define INT64_C(val) val##i64 -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 +# define UINT8_C(val) val##ui8 +# define UINT16_C(val) val##ui16 +# define UINT32_C(val) val##ui32 +# define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants -#define INTMAX_C INT64_C -#define UINTMAX_C UINT64_C - -#endif // __STDC_CONSTANT_MACROS ] +# define INTMAX_C INT64_C +# define UINTMAX_C UINT64_C +# endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ] diff --git a/include/msvc_compat/strings.h b/include/msvc_compat/strings.h index 996f256c..6a1acc0f 100644 --- a/include/msvc_compat/strings.h +++ b/include/msvc_compat/strings.h @@ -4,9 +4,10 @@ /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ #ifdef _MSC_VER -# include -# pragma intrinsic(_BitScanForward) -static __forceinline int ffsl(long x) { +# include +# pragma intrinsic(_BitScanForward) +static __forceinline int +ffsl(long x) { unsigned long i; if (_BitScanForward(&i, x)) { @@ -15,44 +16,46 @@ static __forceinline int ffsl(long x) { return 0; } -static __forceinline int ffs(int x) { +static __forceinline int +ffs(int x) { return ffsl(x); } -# ifdef _M_X64 -# pragma intrinsic(_BitScanForward64) -# endif +# ifdef _M_X64 +# pragma intrinsic(_BitScanForward64) +# endif -static __forceinline int ffsll(unsigned __int64 x) { +static __forceinline int +ffsll(unsigned __int64 x) { unsigned long i; -#ifdef _M_X64 +# ifdef _M_X64 if (_BitScanForward64(&i, x)) { return i + 1; } return 0; -#else -// Fallback for 32-bit build where 64-bit version not available -// assuming little endian +# else + // Fallback for 32-bit build where 64-bit version not available + // assuming little endian union { unsigned __int64 ll; - unsigned long l[2]; + unsigned long l[2]; } s; s.ll = x; if (_BitScanForward(&i, s.l[0])) { return i + 1; - } else if(_BitScanForward(&i, s.l[1])) { + } else if (_BitScanForward(&i, s.l[1])) { return i + 33; } return 0; -#endif +# endif } #else -# define ffsll(x) __builtin_ffsll(x) -# define ffsl(x) __builtin_ffsl(x) -# define ffs(x) __builtin_ffs(x) +# define ffsll(x) __builtin_ffsll(x) +# define ffsl(x) __builtin_ffsl(x) +# define ffs(x) __builtin_ffs(x) #endif #endif /* strings_h */ diff --git a/msvc/test_threads/test_threads.cpp b/msvc/test_threads/test_threads.cpp index 6eed028d..e709c177 100644 --- a/msvc/test_threads/test_threads.cpp +++ b/msvc/test_threads/test_threads.cpp @@ -12,78 +12,108 @@ #define JEMALLOC_NO_DEMANGLE #include -using std::vector; +using std::minstd_rand; using std::thread; using std::uniform_int_distribution; -using std::minstd_rand; +using std::vector; -int test_threads() { - je_malloc_conf = "narenas:3"; - int narenas = 0; - size_t sz = sizeof(narenas); - je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); - if (narenas != 3) { - printf("Error: unexpected number of arenas: %d\n", narenas); - return 1; - } - static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 }; - static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0])); - vector workers; - static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50; - je_malloc_stats_print(NULL, NULL, NULL); - size_t allocated1; - size_t sz1 = sizeof(allocated1); - je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); - printf("\nPress Enter to start threads...\n"); - getchar(); - printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2); - for (int i = 0; i < numThreads; i++) { - workers.emplace_back([tid=i]() { - uniform_int_distribution sizeDist(0, numSizes - 1); - minstd_rand rnd(tid * 17); - uint8_t* ptrs[numAllocsMax]; - int ptrsz[numAllocsMax]; - for (int i = 0; i < numIter1; ++i) { - thread t([&]() { - for (int i = 0; i < numIter2; ++i) { - const int numAllocs = numAllocsMax - sizeDist(rnd); - for (int j = 0; j < numAllocs; j += 64) { - const int x = sizeDist(rnd); - const int sz = sizes[x]; - ptrsz[j] = sz; - ptrs[j] = (uint8_t*)je_malloc(sz); - if (!ptrs[j]) { - printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x); - exit(1); - } - for (int k = 0; k < sz; k++) - ptrs[j][k] = tid + k; - } - for (int j = 0; j < numAllocs; j += 64) { - for (int k = 0, sz = ptrsz[j]; k < sz; k++) - if (ptrs[j][k] != (uint8_t)(tid + k)) { - printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k)); - exit(1); - } - je_free(ptrs[j]); - } - } - }); - t.join(); - } - }); - } - for (thread& t : workers) { - t.join(); - } - je_malloc_stats_print(NULL, NULL, NULL); - size_t allocated2; - je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); - size_t leaked = allocated2 - allocated1; - printf("\nDone. Leaked: %zd bytes\n", leaked); - bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) - printf("\nTest %s!\n", (failed ? "FAILED" : "successful")); - printf("\nPress Enter to continue...\n"); - getchar(); - return failed ? 1 : 0; +int +test_threads() { + je_malloc_conf = "narenas:3"; + int narenas = 0; + size_t sz = sizeof(narenas); + je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); + if (narenas != 3) { + printf("Error: unexpected number of arenas: %d\n", narenas); + return 1; + } + static const int sizes[] = {7, 16, 32, 60, 91, 100, 120, 144, 169, 199, + 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, + 123123, 255265, 2333111}; + static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0])); + vector workers; + static const int numThreads = narenas + 1, numAllocsMax = 25, + numIter1 = 50, numIter2 = 50; + je_malloc_stats_print(NULL, NULL, NULL); + size_t allocated1; + size_t sz1 = sizeof(allocated1); + je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); + printf("\nPress Enter to start threads...\n"); + getchar(); + printf("Starting %d threads x %d x %d iterations...\n", numThreads, + numIter1, numIter2); + for (int i = 0; i < numThreads; i++) { + workers.emplace_back([tid = i]() { + uniform_int_distribution sizeDist(0, numSizes - 1); + minstd_rand rnd(tid * 17); + uint8_t *ptrs[numAllocsMax]; + int ptrsz[numAllocsMax]; + for (int i = 0; i < numIter1; ++i) { + thread t([&]() { + for (int i = 0; i < numIter2; ++i) { + const int numAllocs = + numAllocsMax + - sizeDist(rnd); + for (int j = 0; j < numAllocs; + j += 64) { + const int x = sizeDist( + rnd); + const int sz = sizes[x]; + ptrsz[j] = sz; + ptrs[j] = (uint8_t *) + je_malloc(sz); + if (!ptrs[j]) { + printf( + "Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", + sz, tid, i, + j, x); + exit(1); + } + for (int k = 0; k < sz; + k++) + ptrs[j][k] = tid + + k; + } + for (int j = 0; j < numAllocs; + j += 64) { + for (int k = 0, + sz = ptrsz[j]; + k < sz; k++) + if (ptrs[j][k] + != (uint8_t)(tid + + k)) { + printf( + "Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", + tid, + i, + j, + k, + ptrs[j] + [k], + (uint8_t)(tid + + k)); + exit(1); + } + je_free(ptrs[j]); + } + } + }); + t.join(); + } + }); + } + for (thread &t : workers) { + t.join(); + } + je_malloc_stats_print(NULL, NULL, NULL); + size_t allocated2; + je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); + size_t leaked = allocated2 - allocated1; + printf("\nDone. Leaked: %zd bytes\n", leaked); + bool failed = leaked + > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) + printf("\nTest %s!\n", (failed ? "FAILED" : "successful")); + printf("\nPress Enter to continue...\n"); + getchar(); + return failed ? 1 : 0; } diff --git a/msvc/test_threads/test_threads_main.cpp b/msvc/test_threads/test_threads_main.cpp index 0a022fba..3e88c286 100644 --- a/msvc/test_threads/test_threads_main.cpp +++ b/msvc/test_threads/test_threads_main.cpp @@ -5,7 +5,8 @@ using namespace std::chrono_literals; -int main(int argc, char** argv) { - int rc = test_threads(); - return rc; +int +main(int argc, char **argv) { + int rc = test_threads(); + return rc; } diff --git a/src/arena.c b/src/arena.c index 1586ee91..2f58b038 100644 --- a/src/arena.c +++ b/src/arena.c @@ -22,12 +22,7 @@ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS * options and mallctl processing are straightforward. */ const char *const percpu_arena_mode_names[] = { - "percpu", - "phycpu", - "disabled", - "percpu", - "phycpu" -}; + "percpu", "phycpu", "disabled", "percpu", "phycpu"}; percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; @@ -36,7 +31,7 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; static atomic_zd_t dirty_decay_ms_default; static atomic_zd_t muzzy_decay_ms_default; -emap_t arena_emap_global; +emap_t arena_emap_global; static pa_central_t arena_pa_central_global; div_info_t arena_binind_div_info[SC_NBINS]; @@ -51,14 +46,15 @@ uint32_t arena_bin_offsets[SC_NBINS]; * that,the huge_arena_ind is updated to point to the actual huge arena, * which is the last one of the auto arenas. */ -unsigned huge_arena_ind = 0; -bool opt_huge_arena_pac_thp = false; +unsigned huge_arena_ind = 0; +bool opt_huge_arena_pac_thp = false; pac_thp_t huge_arena_pac_thp = {.thp_madvise = false, - .auto_thp_switched = false, .n_thp_lazy = ATOMIC_INIT(0)}; + .auto_thp_switched = false, + .n_thp_lazy = ATOMIC_INIT(0)}; const arena_config_t arena_config_default = { - /* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks, - /* .metadata_use_hooks = */ true, + /* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks, + /* .metadata_use_hooks = */ true, }; /******************************************************************************/ @@ -67,13 +63,12 @@ const arena_config_t arena_config_default = { * definition. */ -static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, - bool is_background_thread, bool all); -static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, - bin_t *bin); -static void -arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, - size_t npages_new); +static bool arena_decay_dirty( + tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); +static void arena_bin_lower_slab( + tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin); +static void arena_maybe_do_deferred_work( + tsdn_t *tsdn, arena_t *arena, decay_t *decay, size_t npages_new); /******************************************************************************/ @@ -92,8 +87,8 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, - bin_stats_data_t *bstats, arena_stats_large_t *lstats, - pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) { + bin_stats_data_t *bstats, arena_stats_large_t *lstats, pac_estats_t *estats, + hpa_shard_stats_t *hpastats, sec_stats_t *secstats) { cassert(config_stats); arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, @@ -135,8 +130,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, uint64_t nrequests = locked_read_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), &arena->stats.lstats[i].nrequests); - locked_inc_u64_unsynchronized(&lstats[i].nrequests, - nmalloc + nrequests); + locked_inc_u64_unsynchronized( + &lstats[i].nrequests, nmalloc + nrequests); astats->nrequests_large += nmalloc + nrequests; /* nfill == nmalloc for large currently. */ @@ -172,7 +167,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, astats->tcache_stashed_bytes = 0; malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); cache_bin_array_descriptor_t *descriptor; - ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { + ql_foreach (descriptor, &arena->cache_bin_array_descriptor_ql, link) { for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) { cache_bin_t *cache_bin = &descriptor->bins[i]; if (cache_bin_disabled(cache_bin)) { @@ -180,10 +175,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, } cache_bin_sz_t ncached, nstashed; - cache_bin_nitems_get_remote(cache_bin, &ncached, &nstashed); + cache_bin_nitems_get_remote( + cache_bin, &ncached, &nstashed); astats->tcache_bytes += ncached * sz_index2size(i); - astats->tcache_stashed_bytes += nstashed * - sz_index2size(i); + astats->tcache_stashed_bytes += nstashed + * sz_index2size(i); } } malloc_mutex_prof_read(tsdn, @@ -191,19 +187,18 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, &arena->tcache_ql_mtx); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); -#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ - malloc_mutex_lock(tsdn, &arena->mtx); \ - malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ - &arena->mtx); \ - malloc_mutex_unlock(tsdn, &arena->mtx); +#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ + malloc_mutex_lock(tsdn, &arena->mtx); \ + malloc_mutex_prof_read( \ + tsdn, &astats->mutex_prof_data[ind], &arena->mtx); \ + malloc_mutex_unlock(tsdn, &arena->mtx); /* Gather per arena mutex profiling data. */ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); - READ_ARENA_MUTEX_PROF_DATA(base->mtx, - arena_prof_mutex_base); + READ_ARENA_MUTEX_PROF_DATA(base->mtx, arena_prof_mutex_base); #undef READ_ARENA_MUTEX_PROF_DATA - pa_shard_mtx_stats_read(tsdn, &arena->pa_shard, - astats->mutex_prof_data); + pa_shard_mtx_stats_read( + tsdn, &arena->pa_shard, astats->mutex_prof_data); nstime_copy(&astats->uptime, &arena->create_time); nstime_update(&astats->uptime); @@ -211,32 +206,33 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, for (szind_t i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { - bin_stats_merge(tsdn, &bstats[i], - arena_get_bin(arena, i, j)); + bin_stats_merge( + tsdn, &bstats[i], arena_get_bin(arena, i, j)); } } } static void -arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, - bool is_background_thread) { +arena_background_thread_inactivity_check( + tsdn_t *tsdn, arena_t *arena, bool is_background_thread) { if (!background_thread_enabled() || is_background_thread) { return; } - background_thread_info_t *info = - arena_background_thread_info_get(arena); + background_thread_info_t *info = arena_background_thread_info_get( + arena); if (background_thread_indefinite_sleep(info)) { - arena_maybe_do_deferred_work(tsdn, arena, - &arena->pa_shard.pac.decay_dirty, 0); + arena_maybe_do_deferred_work( + tsdn, arena, &arena->pa_shard.pac.decay_dirty, 0); } } /* * React to deferred work generated by a PAI function. */ -void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); +void +arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) { + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) { arena_decay_dirty(tsdn, arena, false, true); @@ -246,34 +242,34 @@ void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) { static void * arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) { - void *ret; + void *ret; slab_data_t *slab_data = edata_slab_data_get(slab); - size_t regind; + size_t regind; assert(edata_nfree_get(slab) > 0); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); - ret = (void *)((byte_t *)edata_addr_get(slab) + - (uintptr_t)(bin_info->reg_size * regind)); + ret = (void *)((byte_t *)edata_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); edata_nfree_dec(slab); return ret; } static void -arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info, - unsigned cnt, void** ptrs) { +arena_slab_reg_alloc_batch( + edata_t *slab, const bin_info_t *bin_info, unsigned cnt, void **ptrs) { slab_data_t *slab_data = edata_slab_data_get(slab); assert(edata_nfree_get(slab) >= cnt); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); -#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) +#if (!defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) for (unsigned i = 0; i < cnt; i++) { - size_t regind = bitmap_sfu(slab_data->bitmap, - &bin_info->bitmap_info); - *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) + - (uintptr_t)(bin_info->reg_size * regind)); + size_t regind = bitmap_sfu( + slab_data->bitmap, &bin_info->bitmap_info); + *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); } #else unsigned group = 0; @@ -327,10 +323,9 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t hindex = index - SC_NBINS; LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.lstats[hindex].nmalloc, 1); + &arena->stats.lstats[hindex].nmalloc, 1); locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.lstats[hindex].active_bytes, - usize); + &arena->stats.lstats[hindex].active_bytes, usize); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } } @@ -353,30 +348,29 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { szind_t hindex = index - SC_NBINS; LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.lstats[hindex].ndalloc, 1); + &arena->stats.lstats[hindex].ndalloc, 1); locked_dec_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), - &arena->stats.lstats[hindex].active_bytes, - usize); + &arena->stats.lstats[hindex].active_bytes, usize); LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } } static void -arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, - size_t usize) { +arena_large_ralloc_stats_update( + tsdn_t *tsdn, arena_t *arena, size_t oldusize, size_t usize) { arena_large_malloc_stats_update(tsdn, arena, usize); arena_large_dalloc_stats_update(tsdn, arena, oldusize); } edata_t * -arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero) { - bool deferred_work_generated = false; +arena_extent_alloc_large( + tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { + bool deferred_work_generated = false; szind_t szind = sz_size2index(usize); - size_t esize = usize + sz_large_pad; + size_t esize = usize + sz_large_pad; - bool guarded = san_large_extent_decide_guard(tsdn, - arena_get_ehooks(arena), esize, alignment); + bool guarded = san_large_extent_decide_guard( + tsdn, arena_get_ehooks(arena), esize, alignment); /* * - if usize >= opt_calloc_madvise_threshold, @@ -406,7 +400,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, * if cache_oblivious is enabled. */ if (zero && !zero_override && !edata_zeroed_get(edata)) { - void *addr = edata_addr_get(edata); + void *addr = edata_addr_get(edata); size_t usize = edata_usize_get(edata); memset(addr, 0, usize); } @@ -417,14 +411,14 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) { if (config_stats) { - arena_large_dalloc_stats_update(tsdn, arena, - edata_usize_get(edata)); + arena_large_dalloc_stats_update( + tsdn, arena, edata_usize_get(edata)); } } void -arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata, - size_t oldusize) { +arena_extent_ralloc_large_shrink( + tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize) { size_t usize = edata_usize_get(edata); if (config_stats) { @@ -433,8 +427,8 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata, } void -arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata, - size_t oldusize) { +arena_extent_ralloc_large_expand( + tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize) { size_t usize = edata_usize_get(edata); if (config_stats) { @@ -459,12 +453,12 @@ arena_decide_unforced_purge_eagerness(bool is_background_thread) { } bool -arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, - ssize_t decay_ms) { +arena_decay_ms_set( + tsdn_t *tsdn, arena_t *arena, extent_state_t state, ssize_t decay_ms) { pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness( /* is_background_thread */ false); - return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms, - eagerness); + return pa_decay_ms_set( + tsdn, &arena->pa_shard, state, decay_ms, eagerness); } ssize_t @@ -474,8 +468,8 @@ arena_decay_ms_get(arena_t *arena, extent_state_t state) { static bool arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay, - pac_decay_stats_t *decay_stats, ecache_t *ecache, - bool is_background_thread, bool all) { + pac_decay_stats_t *decay_stats, ecache_t *ecache, bool is_background_thread, + bool all) { if (all) { malloc_mutex_lock(tsdn, &decay->mtx); pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats, @@ -488,10 +482,10 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay, /* No need to wait if another thread is in progress. */ return true; } - pac_purge_eagerness_t eagerness = - arena_decide_unforced_purge_eagerness(is_background_thread); - bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac, - decay, decay_stats, ecache, eagerness); + pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness( + is_background_thread); + bool epoch_advanced = pac_maybe_decay_purge( + tsdn, &arena->pa_shard.pac, decay, decay_stats, ecache, eagerness); size_t npages_new JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(0); if (epoch_advanced) { /* Backlog is updated on epoch advance. */ @@ -499,8 +493,8 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay, } malloc_mutex_unlock(tsdn, &decay->mtx); - if (have_background_thread && background_thread_enabled() && - epoch_advanced && !is_background_thread) { + if (have_background_thread && background_thread_enabled() + && epoch_advanced && !is_background_thread) { arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new); } @@ -508,16 +502,16 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay, } static bool -arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, - bool all) { +arena_decay_dirty( + tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty, &arena->pa_shard.pac.stats->decay_dirty, &arena->pa_shard.pac.ecache_dirty, is_background_thread, all); } static bool -arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, - bool all) { +arena_decay_muzzy( + tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) { return false; } @@ -564,13 +558,13 @@ arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay, } nstime_subtract(remaining_sleep, &decay->epoch); if (npages_new > 0) { - uint64_t npurge_new = decay_npages_purge_in(decay, - remaining_sleep, npages_new); + uint64_t npurge_new = decay_npages_purge_in( + decay, remaining_sleep, npages_new); info->npages_to_purge_new += npurge_new; } malloc_mutex_unlock(tsdn, &decay->mtx); - return info->npages_to_purge_new > - ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD; + return info->npages_to_purge_new + > ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD; } /* @@ -582,8 +576,8 @@ arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay, * deferred work has been generated. */ static void -arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, - size_t npages_new) { +arena_maybe_do_deferred_work( + tsdn_t *tsdn, arena_t *arena, decay_t *decay, size_t npages_new) { background_thread_info_t *info = arena_background_thread_info_get( arena); if (malloc_mutex_trylock(tsdn, &info->mtx)) { @@ -603,7 +597,7 @@ arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, if (background_thread_indefinite_sleep(info)) { background_thread_wakeup_early(info, NULL); } else if (arena_should_decay_early(tsdn, arena, decay, info, - &remaining_sleep, npages_new)) { + &remaining_sleep, npages_new)) { info->npages_to_purge_new = 0; background_thread_wakeup_early(info, &remaining_sleep); } @@ -687,8 +681,8 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin, unsigned binind) { if (arena_bin_has_batch(binind)) { bin_with_batch_t *batched_bin = (bin_with_batch_t *)bin; - batcher_init(&batched_bin->remote_frees, - BIN_REMOTE_FREE_ELEMS_MAX); + batcher_init( + &batched_bin->remote_frees, BIN_REMOTE_FREE_ELEMS_MAX); } if (bin->slabcur != NULL) { @@ -743,8 +737,8 @@ arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) { assert(ptr != NULL); size_t usize = isalloc(tsdn, ptr); size_t bumped_usize = sz_sa2u(usize, PROF_SAMPLE_ALIGNMENT); - assert(bumped_usize <= SC_LARGE_MINCLASS && - PAGE_CEILING(bumped_usize) == bumped_usize); + assert(bumped_usize <= SC_LARGE_MINCLASS + && PAGE_CEILING(bumped_usize) == bumped_usize); assert(edata_size_get(edata) - bumped_usize <= sz_large_pad); szind_t szind = sz_size2index(bumped_usize); @@ -757,8 +751,8 @@ arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) { } static void -arena_dalloc_promoted_impl(tsdn_t *tsdn, void *ptr, tcache_t *tcache, - bool slow_path, edata_t *edata) { +arena_dalloc_promoted_impl( + tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path, edata_t *edata) { cassert(config_prof); assert(opt_prof); @@ -772,20 +766,20 @@ arena_dalloc_promoted_impl(tsdn_t *tsdn, void *ptr, tcache_t *tcache, safety_check_verify_redzone(ptr, usize, bumped_usize); } szind_t bumped_ind = sz_size2index(bumped_usize); - if (bumped_usize >= SC_LARGE_MINCLASS && - tcache != NULL && bumped_ind < TCACHE_NBINS_MAX && - !tcache_bin_disabled(bumped_ind, &tcache->bins[bumped_ind], - tcache->tcache_slow)) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, bumped_ind, - slow_path); + if (bumped_usize >= SC_LARGE_MINCLASS && tcache != NULL + && bumped_ind < TCACHE_NBINS_MAX + && !tcache_bin_disabled( + bumped_ind, &tcache->bins[bumped_ind], tcache->tcache_slow)) { + tcache_dalloc_large( + tsdn_tsd(tsdn), tcache, ptr, bumped_ind, slow_path); } else { large_dalloc(tsdn, edata); } } void -arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, - bool slow_path) { +arena_dalloc_promoted( + tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); arena_dalloc_promoted_impl(tsdn, ptr, tcache, slow_path, edata); } @@ -810,14 +804,14 @@ arena_reset(tsd_t *tsd, arena_t *arena) { malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); for (edata_t *edata = edata_list_active_first(&arena->large); - edata != NULL; edata = edata_list_active_first(&arena->large)) { - void *ptr = edata_base_get(edata); + edata != NULL; edata = edata_list_active_first(&arena->large)) { + void *ptr = edata_base_get(edata); size_t usize; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); emap_alloc_ctx_t alloc_ctx; - emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, - &alloc_ctx); + emap_alloc_ctx_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); if (config_stats || (config_prof && opt_prof)) { @@ -841,16 +835,16 @@ arena_reset(tsd_t *tsd, arena_t *arena) { /* Bins. */ for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { - arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j), - i); + arena_bin_reset( + tsd, arena, arena_get_bin(arena, i, j), i); } } pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard); } static void -arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes, - unsigned n_mtx) { +arena_prepare_base_deletion_sync_finish( + tsd_t *tsd, malloc_mutex_t **mutexes, unsigned n_mtx) { for (unsigned i = 0; i < n_mtx; i++) { malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]); malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]); @@ -909,9 +903,9 @@ arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) { unsigned destroy_ind = base_ind_get(base_to_destroy); assert(destroy_ind >= manual_arena_base); - tsdn_t *tsdn = tsd_tsdn(tsd); + tsdn_t *tsdn = tsd_tsdn(tsd); malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX]; - unsigned n_delayed = 0, total = narenas_total_get(); + unsigned n_delayed = 0, total = narenas_total_get(); for (unsigned i = 0; i < total; i++) { if (i == destroy_ind) { continue; @@ -921,12 +915,12 @@ arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) { continue; } pac_t *pac = &arena->pa_shard.pac; - arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx, - delayed_mtx, &n_delayed); - arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx, - delayed_mtx, &n_delayed); - arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx, - delayed_mtx, &n_delayed); + arena_prepare_base_deletion_sync( + tsd, &pac->ecache_dirty.mtx, delayed_mtx, &n_delayed); + arena_prepare_base_deletion_sync( + tsd, &pac->ecache_muzzy.mtx, delayed_mtx, &n_delayed); + arena_prepare_base_deletion_sync( + tsd, &pac->ecache_retained.mtx, delayed_mtx, &n_delayed); } arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed); } @@ -968,17 +962,17 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { } static edata_t * -arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, - const bin_info_t *bin_info) { +arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, + unsigned binshard, const bin_info_t *bin_info) { bool deferred_work_generated = false; - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - bool guarded = san_slab_extent_decide_guard(tsdn, - arena_get_ehooks(arena)); + bool guarded = san_slab_extent_decide_guard( + tsdn, arena_get_ehooks(arena)); edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size, /* alignment */ PAGE, /* slab */ true, /* szind */ binind, - /* zero */ false, guarded, &deferred_work_generated); + /* zero */ false, guarded, &deferred_work_generated); if (deferred_work_generated) { arena_handle_deferred_work(tsdn, arena); @@ -1024,15 +1018,15 @@ static void * arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind, edata_t *fresh_slab) { malloc_mutex_assert_owner(tsdn, &bin->lock); - arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind, - fresh_slab); + arena_bin_refill_slabcur_with_fresh_slab( + tsdn, arena, bin, binind, fresh_slab); return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]); } static bool -arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, - bin_t *bin) { +arena_bin_refill_slabcur_no_fresh_slab( + tsdn_t *tsdn, arena_t *arena, bin_t *bin) { malloc_mutex_assert_owner(tsdn, &bin->lock); /* Only called after arena_slab_reg_alloc[_batch] failed. */ assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0); @@ -1049,8 +1043,8 @@ arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, } bin_t * -arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, - unsigned *binshard_p) { +arena_bin_choose( + tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard_p) { unsigned binshard; if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { binshard = 0; @@ -1065,8 +1059,8 @@ arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, } void -arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, - cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill_min, +arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, cache_bin_t *cache_bin, + szind_t binind, const cache_bin_sz_t nfill_min, const cache_bin_sz_t nfill_max) { assert(cache_bin_ncached_get_local(cache_bin) == 0); assert(nfill_min > 0 && nfill_min <= nfill_max); @@ -1102,12 +1096,12 @@ arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, * local exhausted, b) unlock and slab_alloc returns null, c) re-lock * and bin local fails again. */ - bool made_progress = true; - edata_t *fresh_slab = NULL; - bool alloc_and_retry = false; + bool made_progress = true; + edata_t *fresh_slab = NULL; + bool alloc_and_retry = false; cache_bin_sz_t filled = 0; - unsigned binshard; - bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); + unsigned binshard; + bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); /* * This has some fields that are conditionally initialized down batch @@ -1120,7 +1114,8 @@ arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, JEMALLOC_CLANG_ANALYZER_SILENCE_INIT({0}); label_refill: malloc_mutex_lock(tsdn, &bin->lock); - arena_bin_flush_batch_after_lock(tsdn, arena, bin, binind, &batch_flush_state); + arena_bin_flush_batch_after_lock( + tsdn, arena, bin, binind, &batch_flush_state); while (filled < nfill_min) { /* Try batch-fill from slabcur first. */ @@ -1136,8 +1131,8 @@ label_refill: cnt = nfill_min - filled; } - arena_slab_reg_alloc_batch(slabcur, bin_info, cnt, - &ptrs.ptr[filled]); + arena_slab_reg_alloc_batch( + slabcur, bin_info, cnt, &ptrs.ptr[filled]); made_progress = true; filled += cnt; continue; @@ -1150,8 +1145,8 @@ label_refill: /* Then see if a new slab was reserved already. */ if (fresh_slab != NULL) { - arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, - bin, binind, fresh_slab); + arena_bin_refill_slabcur_with_fresh_slab( + tsdn, arena, bin, binind, fresh_slab); assert(bin->slabcur != NULL); fresh_slab = NULL; continue; @@ -1181,27 +1176,27 @@ label_refill: cache_bin->tstats.nrequests = 0; } - arena_bin_flush_batch_before_unlock(tsdn, arena, bin, binind, - &batch_flush_state); + arena_bin_flush_batch_before_unlock( + tsdn, arena, bin, binind, &batch_flush_state); malloc_mutex_unlock(tsdn, &bin->lock); - arena_bin_flush_batch_after_unlock(tsdn, arena, bin, binind, - &batch_flush_state); + arena_bin_flush_batch_after_unlock( + tsdn, arena, bin, binind, &batch_flush_state); if (alloc_and_retry) { assert(fresh_slab == NULL); assert(filled < nfill_min); assert(made_progress); - fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard, - bin_info); + fresh_slab = arena_slab_alloc( + tsdn, arena, binind, binshard, bin_info); /* fresh_slab NULL case handled in the for loop. */ alloc_and_retry = false; made_progress = false; goto label_refill; } - assert((filled >= nfill_min && filled <= nfill_max) || - (fresh_slab == NULL && !made_progress)); + assert((filled >= nfill_min && filled <= nfill_max) + || (fresh_slab == NULL && !made_progress)); /* Release if allocated but not used. */ if (fresh_slab != NULL) { @@ -1219,22 +1214,24 @@ arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, void **ptrs, size_t nfill, bool zero) { assert(binind < SC_NBINS); const bin_info_t *bin_info = &bin_infos[binind]; - const size_t nregs = bin_info->nregs; + const size_t nregs = bin_info->nregs; assert(nregs > 0); const size_t usize = bin_info->reg_size; const bool manual_arena = !arena_is_auto(arena); - unsigned binshard; - bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); + unsigned binshard; + bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); - size_t nslab = 0; - size_t filled = 0; - edata_t *slab = NULL; + size_t nslab = 0; + size_t filled = 0; + edata_t *slab = NULL; edata_list_active_t fulls; edata_list_active_init(&fulls); - while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind, - binshard, bin_info)) != NULL) { + while (filled < nfill + && (slab = arena_slab_alloc( + tsdn, arena, binind, binshard, bin_info)) + != NULL) { assert((size_t)edata_nfree_get(slab) == nregs); ++nslab; size_t batch = nfill - filled; @@ -1242,8 +1239,8 @@ arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, batch = nregs; } assert(batch > 0); - arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch, - &ptrs[filled]); + arena_slab_reg_alloc_batch( + slab, bin_info, (unsigned)batch, &ptrs[filled]); assert(edata_addr_get(slab) == ptrs[filled]); if (zero) { memset(ptrs[filled], 0, batch * usize); @@ -1287,8 +1284,8 @@ arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, * bin->slabcur if necessary. */ static void * -arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin, - szind_t binind) { +arena_bin_malloc_no_fresh_slab( + tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind) { malloc_mutex_assert_owner(tsdn, &bin->lock); if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) { if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) { @@ -1304,18 +1301,18 @@ static void * arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { assert(binind < SC_NBINS); const bin_info_t *bin_info = &bin_infos[binind]; - size_t usize = sz_index2size(binind); - unsigned binshard; + size_t usize = sz_index2size(binind); + unsigned binshard; bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); malloc_mutex_lock(tsdn, &bin->lock); edata_t *fresh_slab = NULL; - void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind); + void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind); if (ret == NULL) { malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard, - bin_info); + fresh_slab = arena_slab_alloc( + tsdn, arena, binind, binshard, bin_info); /********************************/ malloc_mutex_lock(tsdn, &bin->lock); /* Retry since the lock was dropped. */ @@ -1326,8 +1323,8 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { malloc_mutex_unlock(tsdn, &bin->lock); return NULL; } - ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin, - binind, fresh_slab); + ret = arena_bin_malloc_with_fresh_slab( + tsdn, arena, bin, binind, fresh_slab); fresh_slab = NULL; } } @@ -1390,7 +1387,8 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, if (likely(alignment <= CACHELINE)) { return large_malloc(tsdn, arena, usize, zero); } else { - return large_palloc(tsdn, arena, usize, alignment, zero); + return large_palloc( + tsdn, arena, usize, alignment, zero); } } } @@ -1401,7 +1399,7 @@ arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) { if (slab == bin->slabcur) { bin->slabcur = NULL; } else { - szind_t binind = edata_szind_get(slab); + szind_t binind = edata_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; /* @@ -1418,8 +1416,7 @@ arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) { } static void -arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, - bin_t *bin) { +arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin) { assert(edata_nfree_get(slab) > 0); /* @@ -1455,24 +1452,24 @@ arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) { } void -arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena, - edata_t *slab, bin_t *bin) { +arena_dalloc_bin_locked_handle_newly_empty( + tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin) { arena_dissociate_bin_slab(arena, slab, bin); arena_dalloc_bin_slab_prepare(tsdn, slab, bin); } void -arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena, - edata_t *slab, bin_t *bin) { +arena_dalloc_bin_locked_handle_newly_nonempty( + tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin) { arena_bin_slabs_full_remove(arena, bin, slab); arena_bin_lower_slab(tsdn, arena, slab, bin); } static void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) { - szind_t binind = edata_szind_get(edata); + szind_t binind = edata_szind_get(edata); unsigned binshard = edata_binshard_get(edata); - bin_t *bin = arena_get_bin(arena, binind, binshard); + bin_t *bin = arena_get_bin(arena, binind, binshard); malloc_mutex_lock(tsdn, &bin->lock); arena_dalloc_bin_locked_info_t info; @@ -1515,16 +1512,15 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t usize_min = sz_s2u(size); size_t usize_max = sz_s2u(size + extra); - if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min - <= SC_SMALL_MAXCLASS)) { + if (likely(oldsize <= SC_SMALL_MAXCLASS + && usize_min <= SC_SMALL_MAXCLASS)) { /* * Avoid moving the allocation if the size class can be left the * same. */ - assert(bin_infos[sz_size2index(oldsize)].reg_size == - oldsize); + assert(bin_infos[sz_size2index(oldsize)].reg_size == oldsize); if ((usize_max > SC_SMALL_MAXCLASS - || sz_size2index(usize_max) != sz_size2index(oldsize)) + || sz_size2index(usize_max) != sz_size2index(oldsize)) && (size > oldsize || usize_max < oldsize)) { ret = true; goto done; @@ -1535,8 +1531,8 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, ret = false; } else if (oldsize >= SC_LARGE_MINCLASS && usize_max >= SC_LARGE_MINCLASS) { - ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max, - zero); + ret = large_ralloc_no_move( + tsdn, edata, usize_min, usize_max, zero); } else { ret = true; } @@ -1558,8 +1554,8 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return NULL; } - return ipalloct_explicit_slab(tsdn, usize, alignment, zero, slab, - tcache, arena); + return ipalloct_explicit_slab( + tsdn, usize, alignment, zero, slab, tcache, arena); } void * @@ -1575,37 +1571,38 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, assert(sz_can_use_slab(usize)); /* Try to avoid moving the allocation. */ UNUSED size_t newsize; - if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero, - &newsize)) { + if (!arena_ralloc_no_move( + tsdn, ptr, oldsize, usize, 0, zero, &newsize)) { hook_invoke_expand(hook_args->is_realloc - ? hook_expand_realloc : hook_expand_rallocx, + ? hook_expand_realloc + : hook_expand_rallocx, ptr, oldsize, usize, (uintptr_t)ptr, hook_args->args); return ptr; } } - if (oldsize >= SC_LARGE_MINCLASS - && usize >= SC_LARGE_MINCLASS) { - return large_ralloc(tsdn, arena, ptr, usize, - alignment, zero, tcache, hook_args); + if (oldsize >= SC_LARGE_MINCLASS && usize >= SC_LARGE_MINCLASS) { + return large_ralloc(tsdn, arena, ptr, usize, alignment, zero, + tcache, hook_args); } /* * size and oldsize are different enough that we need to move the * object. In that case, fall back to allocating new space and copying. */ - void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, - zero, slab, tcache); + void *ret = arena_ralloc_move_helper( + tsdn, arena, usize, alignment, zero, slab, tcache); if (ret == NULL) { return NULL; } - hook_invoke_alloc(hook_args->is_realloc - ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, - hook_args->args); - hook_invoke_dalloc(hook_args->is_realloc - ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); + hook_invoke_alloc( + hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, + ret, (uintptr_t)ret, hook_args->args); + hook_invoke_dalloc( + hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx, + ptr, hook_args->args); /* * Junk/zero-filling were already done by @@ -1623,8 +1620,8 @@ arena_get_ehooks(arena_t *arena) { } extent_hooks_t * -arena_set_extent_hooks(tsd_t *tsd, arena_t *arena, - extent_hooks_t *extent_hooks) { +arena_set_extent_hooks( + tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { background_thread_info_t *info; if (have_background_thread) { info = arena_background_thread_info_get(arena); @@ -1699,11 +1696,11 @@ arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { } bool -arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, - size_t *new_limit) { +arena_retain_grow_limit_get_set( + tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit) { assert(opt_retain); - return pac_retain_grow_limit_get_set(tsd_tsdn(tsd), - &arena->pa_shard.pac, old_limit, new_limit); + return pac_retain_grow_limit_get_set( + tsd_tsdn(tsd), &arena->pa_shard.pac, old_limit, new_limit); } unsigned @@ -1724,7 +1721,7 @@ arena_nthreads_dec(arena_t *arena, bool internal) { arena_t * arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { arena_t *arena; - base_t *base; + base_t *base; if (ind == 0) { base = b0get(); @@ -1736,8 +1733,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { } } - size_t arena_size = ALIGNMENT_CEILING(sizeof(arena_t), CACHELINE) + - sizeof(bin_with_batch_t) * bin_info_nbatched_bins + size_t arena_size = ALIGNMENT_CEILING(sizeof(arena_t), CACHELINE) + + sizeof(bin_with_batch_t) * bin_info_nbatched_bins + sizeof(bin_t) * bin_info_nunbatched_bins; arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); if (arena == NULL) { @@ -1756,27 +1753,27 @@ arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { ql_new(&arena->tcache_ql); ql_new(&arena->cache_bin_array_descriptor_ql); if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", - WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { goto label_error; } } - atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), - ATOMIC_RELAXED); + atomic_store_u( + &arena->dss_prec, (unsigned)extent_dss_prec_get(), ATOMIC_RELAXED); edata_list_active_init(&arena->large); if (malloc_mutex_init(&arena->large_mtx, "arena_large", - WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { goto label_error; } nstime_t cur_time; nstime_init_update(&cur_time); if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global, - &arena_emap_global, base, ind, &arena->stats.pa_shard_stats, - LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold, - arena_dirty_decay_ms_default_get(), - arena_muzzy_decay_ms_default_get())) { + &arena_emap_global, base, ind, &arena->stats.pa_shard_stats, + LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold, + arena_dirty_decay_ms_default_get(), + arena_muzzy_decay_ms_default_get())) { goto label_error; } @@ -1785,7 +1782,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_t *bin = arena_get_bin(arena, i, j); - bool err = bin_init(bin, i); + bool err = bin_init(bin, i); if (err) { goto label_error; } @@ -1814,8 +1811,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) { hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts; hpa_shard_opts.deferral_allowed = background_thread_enabled(); - if (pa_shard_enable_hpa(tsdn, &arena->pa_shard, - &hpa_shard_opts, &opt_hpa_sec_opts)) { + if (pa_shard_enable_hpa(tsdn, &arena->pa_shard, &hpa_shard_opts, + &opt_hpa_sec_opts)) { goto label_error; } } @@ -1866,13 +1863,13 @@ arena_create_huge_arena(tsd_t *tsd, unsigned ind) { */ if (!background_thread_enabled() && arena_dirty_decay_ms_default_get() > 0) { - arena_decay_ms_set(tsd_tsdn(tsd), huge_arena, - extent_state_dirty, 0); + arena_decay_ms_set( + tsd_tsdn(tsd), huge_arena, extent_state_dirty, 0); } if (!background_thread_enabled() - &&arena_muzzy_decay_ms_default_get() > 0) { - arena_decay_ms_set(tsd_tsdn(tsd), huge_arena, - extent_state_muzzy, 0); + && arena_muzzy_decay_ms_default_get() > 0) { + arena_decay_ms_set( + tsd_tsdn(tsd), huge_arena, extent_state_muzzy, 0); } return huge_arena; @@ -1900,8 +1897,8 @@ arena_init_huge(tsdn_t *tsdn, arena_t *a0) { assert(huge_arena_ind == 0); /* The threshold should be large size class. */ - if (opt_oversize_threshold > SC_LARGE_MAXCLASS || - opt_oversize_threshold < SC_LARGE_MINCLASS) { + if (opt_oversize_threshold > SC_LARGE_MAXCLASS + || opt_oversize_threshold < SC_LARGE_MINCLASS) { opt_oversize_threshold = 0; oversize_threshold = SC_LARGE_MAXCLASS + PAGE; huge_enabled = false; @@ -1917,10 +1914,11 @@ arena_init_huge(tsdn_t *tsdn, arena_t *a0) { base_t *b0 = a0->base; /* Make sure that b0 thp auto-switch won't happen concurrently here. */ malloc_mutex_lock(tsdn, &b0->mtx); - (&huge_arena_pac_thp)->thp_madvise = opt_huge_arena_pac_thp && - metadata_thp_enabled() && (opt_thp == thp_mode_default) && - (init_system_thp_mode == thp_mode_default); - (&huge_arena_pac_thp)->auto_thp_switched = b0->auto_thp_switched; + (&huge_arena_pac_thp)->thp_madvise = opt_huge_arena_pac_thp + && metadata_thp_enabled() && (opt_thp == thp_mode_default) + && (init_system_thp_mode == thp_mode_default); + (&huge_arena_pac_thp)->auto_thp_switched = + b0->auto_thp_switched; malloc_mutex_init(&(&huge_arena_pac_thp)->lock, "pac_thp", WITNESS_RANK_LEAF, malloc_mutex_rank_exclusive); edata_list_active_init(&(&huge_arena_pac_thp)->thp_lazy_list); @@ -1942,16 +1940,16 @@ arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) { } JEMALLOC_SUPPRESS_WARN_ON_USAGE( - uint32_t cur_offset = (uint32_t)offsetof(arena_t, all_bins); - ) + uint32_t cur_offset = (uint32_t)offsetof(arena_t, all_bins);) for (szind_t i = 0; i < SC_NBINS; i++) { arena_bin_offsets[i] = cur_offset; uint32_t bin_sz = (i < bin_info_nbatched_sizes - ? sizeof(bin_with_batch_t) : sizeof(bin_t)); + ? sizeof(bin_with_batch_t) + : sizeof(bin_t)); cur_offset += (uint32_t)bin_infos[i].n_shards * bin_sz; } - return pa_central_init(&arena_pa_central_global, base, hpa, - &hpa_hooks_default); + return pa_central_init( + &arena_pa_central_global, base, hpa, &hpa_hooks_default); } void diff --git a/src/background_thread.c b/src/background_thread.c index 511febac..2eb08dd2 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -11,15 +11,15 @@ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS /* This option should be opt-in only. */ #define BACKGROUND_THREAD_DEFAULT false /* Read-only after initialization. */ -bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; +bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1; /* Used for thread creation, termination and stats. */ malloc_mutex_t background_thread_lock; /* Indicates global state. Atomic because decay reads this w/o locking. */ atomic_b_t background_thread_enabled_state; -size_t n_background_threads; -size_t max_background_threads; +size_t n_background_threads; +size_t max_background_threads; /* Thread info per-index. */ background_thread_info_t *background_thread_info; @@ -32,11 +32,11 @@ static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, static void pthread_create_wrapper_init(void) { -#ifdef JEMALLOC_LAZY_LOCK +# ifdef JEMALLOC_LAZY_LOCK if (!isthreaded) { isthreaded = true; } -#endif +# endif } int @@ -47,9 +47,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, return pthread_create_fptr(thread, attr, start_routine, arg); } -#ifdef JEMALLOC_HAVE_DLSYM -#include -#endif +# ifdef JEMALLOC_HAVE_DLSYM +# include +# endif static bool pthread_create_fptr_init(void) { @@ -61,17 +61,18 @@ pthread_create_fptr_init(void) { * wrapper for pthread_create; and 2) application may define its own * wrapper as well (and can call malloc within the wrapper). */ -#ifdef JEMALLOC_HAVE_DLSYM +# ifdef JEMALLOC_HAVE_DLSYM pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); if (pthread_create_fptr == NULL) { pthread_create_fptr = dlsym(RTLD_DEFAULT, "pthread_create"); } -#else +# else pthread_create_fptr = NULL; -#endif +# endif if (pthread_create_fptr == NULL) { if (config_lazy_lock) { - malloc_write(": Error in dlsym(RTLD_NEXT, " + malloc_write( + ": Error in dlsym(RTLD_NEXT, " "\"pthread_create\")\n"); abort(); } else { @@ -85,21 +86,24 @@ pthread_create_fptr_init(void) { #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ #ifndef JEMALLOC_BACKGROUND_THREAD -#define NOT_REACHED { not_reached(); } -bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED -bool background_threads_enable(tsd_t *tsd) NOT_REACHED -bool background_threads_disable(tsd_t *tsd) NOT_REACHED -bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED -void background_thread_wakeup_early(background_thread_info_t *info, - nstime_t *remaining_sleep) NOT_REACHED -void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED -void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED -void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED -void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED -bool background_thread_stats_read(tsdn_t *tsdn, - background_thread_stats_t *stats) NOT_REACHED -void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED -#undef NOT_REACHED +# define NOT_REACHED \ + { not_reached(); } +bool +background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED + bool background_threads_enable(tsd_t *tsd) NOT_REACHED + bool background_threads_disable(tsd_t *tsd) NOT_REACHED + bool background_thread_is_started( + background_thread_info_t *info) NOT_REACHED + void background_thread_wakeup_early( + background_thread_info_t *info, nstime_t *remaining_sleep) NOT_REACHED + void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED + void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED + void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED + void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED + bool background_thread_stats_read( + tsdn_t *tsdn, background_thread_stats_t *stats) NOT_REACHED + void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED +# undef NOT_REACHED #else static bool background_thread_enabled_at_fork; @@ -116,49 +120,50 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { static inline bool set_current_thread_affinity(int cpu) { -#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) || defined(JEMALLOC_HAVE_PTHREAD_SETAFFINITY_NP) -#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) +# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) \ + || defined(JEMALLOC_HAVE_PTHREAD_SETAFFINITY_NP) +# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) cpu_set_t cpuset; -#else -# ifndef __NetBSD__ +# else +# ifndef __NetBSD__ cpuset_t cpuset; -# else +# else cpuset_t *cpuset; -# endif -#endif +# endif +# endif -#ifndef __NetBSD__ +# ifndef __NetBSD__ CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); -#else +# else cpuset = cpuset_create(); -#endif +# endif -#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) +# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0); -#else -# ifndef __NetBSD__ - int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t), - &cpuset); -# else - int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset), - cpuset); +# else +# ifndef __NetBSD__ + int ret = pthread_setaffinity_np( + pthread_self(), sizeof(cpuset_t), &cpuset); +# else + int ret = pthread_setaffinity_np( + pthread_self(), cpuset_size(cpuset), cpuset); cpuset_destroy(cpuset); -# endif +# endif return ret != 0; -#endif -#else - return false; -#endif +# endif +# else + return false; +# endif } -#define BILLION UINT64_C(1000000000) +# define BILLION UINT64_C(1000000000) /* Minimal sleep interval 100 ms. */ -#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) +# define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) static int -background_thread_cond_wait(background_thread_info_t *info, - struct timespec *ts) { +background_thread_cond_wait( + background_thread_info_t *info, struct timespec *ts) { int ret; /* @@ -177,8 +182,8 @@ background_thread_cond_wait(background_thread_info_t *info, } static void -background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, - uint64_t interval) { +background_thread_sleep( + tsdn_t *tsdn, background_thread_info_t *info, uint64_t interval) { if (config_stats) { info->tot_n_runs++; } @@ -192,21 +197,21 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, int ret; if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { - background_thread_wakeup_time_set(tsdn, info, - BACKGROUND_THREAD_INDEFINITE_SLEEP); + background_thread_wakeup_time_set( + tsdn, info, BACKGROUND_THREAD_INDEFINITE_SLEEP); ret = background_thread_cond_wait(info, NULL); assert(ret == 0); } else { - assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && - interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS + && interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); /* We need malloc clock (can be different from tv). */ nstime_t next_wakeup; nstime_init_update(&next_wakeup); nstime_iadd(&next_wakeup, interval); - assert(nstime_ns(&next_wakeup) < - BACKGROUND_THREAD_INDEFINITE_SLEEP); - background_thread_wakeup_time_set(tsdn, info, - nstime_ns(&next_wakeup)); + assert(nstime_ns(&next_wakeup) + < BACKGROUND_THREAD_INDEFINITE_SLEEP); + background_thread_wakeup_time_set( + tsdn, info, nstime_ns(&next_wakeup)); nstime_t ts_wakeup; nstime_copy(&ts_wakeup, &before_sleep); @@ -245,11 +250,11 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { } static inline void -background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, - unsigned ind) { +background_work_sleep_once( + tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX; unsigned narenas = narenas_total_get(); - bool slept_indefinitely = background_thread_indefinite_sleep(info); + bool slept_indefinitely = background_thread_indefinite_sleep(info); for (unsigned i = ind; i < narenas; i += max_background_threads) { arena_t *arena = arena_get(tsdn, i, false); @@ -279,11 +284,10 @@ background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) { sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP; } else { - sleep_ns = - (ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS) + sleep_ns = (ns_until_deferred + < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? BACKGROUND_THREAD_MIN_INTERVAL_NS : ns_until_deferred; - } background_thread_sleep(tsdn, info, sleep_ns); @@ -292,11 +296,11 @@ background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, static bool background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { if (info == &background_thread_info[0]) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), - &background_thread_lock); + malloc_mutex_assert_owner( + tsd_tsdn(tsd), &background_thread_lock); } else { - malloc_mutex_assert_not_owner(tsd_tsdn(tsd), - &background_thread_lock); + malloc_mutex_assert_not_owner( + tsd_tsdn(tsd), &background_thread_lock); } pre_reentrancy(tsd, NULL); @@ -340,21 +344,23 @@ background_thread_create_signals_masked(pthread_t *thread, sigset_t set; sigfillset(&set); sigset_t oldset; - int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); + int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); if (mask_err != 0) { return mask_err; } - int create_err = pthread_create_wrapper(thread, attr, start_routine, - arg); + int create_err = pthread_create_wrapper( + thread, attr, start_routine, arg); /* * Restore the signal mask. Failure to restore the signal mask here * changes program behavior. */ int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); if (restore_err != 0) { - malloc_printf(": background thread creation " + malloc_printf( + ": background thread creation " "failed (%d), and signal mask restoration failed " - "(%d)\n", create_err, restore_err); + "(%d)\n", + create_err, restore_err); if (opt_abort) { abort(); } @@ -364,8 +370,8 @@ background_thread_create_signals_masked(pthread_t *thread, static bool check_background_thread_creation(tsd_t *tsd, - const size_t const_max_background_threads, - unsigned *n_created, bool *created_threads) { + const size_t const_max_background_threads, unsigned *n_created, + bool *created_threads) { bool ret = false; if (likely(*n_created == n_background_threads)) { return ret; @@ -391,7 +397,7 @@ check_background_thread_creation(tsd_t *tsd, pre_reentrancy(tsd, NULL); int err = background_thread_create_signals_masked(&info->thread, - /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ + /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ NULL, background_thread_entry, (void *)(uintptr_t)i); post_reentrancy(tsd); @@ -399,8 +405,10 @@ check_background_thread_creation(tsd_t *tsd, (*n_created)++; created_threads[i] = true; } else { - malloc_printf(": background thread " - "creation failed (%d)\n", err); + malloc_printf( + ": background thread " + "creation failed (%d)\n", + err); if (opt_abort) { abort(); } @@ -434,16 +442,17 @@ background_thread0_work(tsd_t *tsd) { /* Start working, and create more threads when asked. */ unsigned n_created = 1; while (background_thread_info[0].state != background_thread_stopped) { - if (background_thread_pause_check(tsd_tsdn(tsd), - &background_thread_info[0])) { + if (background_thread_pause_check( + tsd_tsdn(tsd), &background_thread_info[0])) { continue; } - if (check_background_thread_creation(tsd, const_max_background_threads, - &n_created, (bool *)&created_threads)) { + if (check_background_thread_creation(tsd, + const_max_background_threads, &n_created, + (bool *)&created_threads)) { continue; } - background_work_sleep_once(tsd_tsdn(tsd), - &background_thread_info[0], 0); + background_work_sleep_once( + tsd_tsdn(tsd), &background_thread_info[0], 0); } /* @@ -460,8 +469,8 @@ background_thread0_work(tsd_t *tsd) { malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); if (info->state != background_thread_stopped) { /* The thread was not created. */ - assert(info->state == - background_thread_started); + assert( + info->state == background_thread_started); n_background_threads--; info->state = background_thread_stopped; } @@ -477,14 +486,14 @@ background_work(tsd_t *tsd, unsigned ind) { background_thread_info_t *info = &background_thread_info[ind]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); - background_thread_wakeup_time_set(tsd_tsdn(tsd), info, - BACKGROUND_THREAD_INDEFINITE_SLEEP); + background_thread_wakeup_time_set( + tsd_tsdn(tsd), info, BACKGROUND_THREAD_INDEFINITE_SLEEP); if (ind == 0) { background_thread0_work(tsd); } else { while (info->state != background_thread_stopped) { - if (background_thread_pause_check(tsd_tsdn(tsd), - info)) { + if (background_thread_pause_check( + tsd_tsdn(tsd), info)) { continue; } background_work_sleep_once(tsd_tsdn(tsd), info, ind); @@ -499,11 +508,11 @@ static void * background_thread_entry(void *ind_arg) { unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; assert(thread_ind < max_background_threads); -#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP +# ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); -#elif defined(JEMALLOC_HAVE_PTHREAD_SET_NAME_NP) +# elif defined(JEMALLOC_HAVE_PTHREAD_SET_NAME_NP) pthread_set_name_np(pthread_self(), "jemalloc_bg_thd"); -#endif +# endif if (opt_percpu_arena != percpu_arena_disabled) { set_current_thread_affinity((int)thread_ind); } @@ -513,8 +522,8 @@ background_thread_entry(void *ind_arg) { * turn triggers another background thread creation). */ background_work(tsd_internal_fetch(), thread_ind); - assert(pthread_equal(pthread_self(), - background_thread_info[thread_ind].thread)); + assert(pthread_equal( + pthread_self(), background_thread_info[thread_ind].thread)); return NULL; } @@ -538,8 +547,8 @@ background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) { bool need_new_thread; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); - need_new_thread = background_thread_enabled() && - (info->state == background_thread_stopped); + need_new_thread = background_thread_enabled() + && (info->state == background_thread_stopped); if (need_new_thread) { background_thread_init(tsd, info); } @@ -564,13 +573,15 @@ background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) { * background threads with the underlying pthread_create. */ int err = background_thread_create_signals_masked(&info->thread, NULL, - /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ + /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ background_thread_entry, (void *)thread_ind); post_reentrancy(tsd); if (err != 0) { - malloc_printf(": arena 0 background thread creation " - "failed (%d)\n", err); + malloc_printf( + ": arena 0 background thread creation " + "failed (%d)\n", + err); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); info->state = background_thread_stopped; n_background_threads--; @@ -612,12 +623,12 @@ background_threads_enable(tsd_t *tsd) { /* Mark the threads we need to create for thread 0. */ unsigned narenas = narenas_total_get(); for (unsigned i = 1; i < narenas; i++) { - if (marked[i % max_background_threads] || - arena_get(tsd_tsdn(tsd), i, false) == NULL) { + if (marked[i % max_background_threads] + || arena_get(tsd_tsdn(tsd), i, false) == NULL) { continue; } - background_thread_info_t *info = &background_thread_info[ - i % max_background_threads]; + background_thread_info_t *info = + &background_thread_info[i % max_background_threads]; malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); assert(info->state == background_thread_stopped); background_thread_init(tsd, info); @@ -635,8 +646,8 @@ background_threads_enable(tsd_t *tsd) { for (unsigned i = 0; i < narenas; i++) { arena_t *arena = arena_get(tsd_tsdn(tsd), i, false); if (arena != NULL) { - pa_shard_set_deferral_allowed(tsd_tsdn(tsd), - &arena->pa_shard, true); + pa_shard_set_deferral_allowed( + tsd_tsdn(tsd), &arena->pa_shard, true); } } return false; @@ -648,8 +659,8 @@ background_threads_disable(tsd_t *tsd) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); /* Thread 0 will be responsible for terminating other threads. */ - if (background_threads_disable_single(tsd, - &background_thread_info[0])) { + if (background_threads_disable_single( + tsd, &background_thread_info[0])) { return true; } assert(n_background_threads == 0); @@ -657,8 +668,8 @@ background_threads_disable(tsd_t *tsd) { for (unsigned i = 0; i < narenas; i++) { arena_t *arena = arena_get(tsd_tsdn(tsd), i, false); if (arena != NULL) { - pa_shard_set_deferral_allowed(tsd_tsdn(tsd), - &arena->pa_shard, false); + pa_shard_set_deferral_allowed( + tsd_tsdn(tsd), &arena->pa_shard, false); } } @@ -671,15 +682,15 @@ background_thread_is_started(background_thread_info_t *info) { } void -background_thread_wakeup_early(background_thread_info_t *info, - nstime_t *remaining_sleep) { +background_thread_wakeup_early( + background_thread_info_t *info, nstime_t *remaining_sleep) { /* * This is an optimization to increase batching. At this point * we know that background thread wakes up soon, so the time to cache * the just freed memory is bounded and low. */ - if (remaining_sleep != NULL && nstime_ns(remaining_sleep) < - BACKGROUND_THREAD_MIN_INTERVAL_NS) { + if (remaining_sleep != NULL + && nstime_ns(remaining_sleep) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { return; } pthread_cond_signal(&info->cond); @@ -701,8 +712,8 @@ background_thread_prefork1(tsdn_t *tsdn) { void background_thread_postfork_parent(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { - malloc_mutex_postfork_parent(tsdn, - &background_thread_info[i].mtx); + malloc_mutex_postfork_parent( + tsdn, &background_thread_info[i].mtx); } malloc_mutex_postfork_parent(tsdn, &background_thread_lock); } @@ -710,8 +721,8 @@ background_thread_postfork_parent(tsdn_t *tsdn) { void background_thread_postfork_child(tsdn_t *tsdn) { for (unsigned i = 0; i < max_background_threads; i++) { - malloc_mutex_postfork_child(tsdn, - &background_thread_info[i].mtx); + malloc_mutex_postfork_child( + tsdn, &background_thread_info[i].mtx); } malloc_mutex_postfork_child(tsdn, &background_thread_lock); if (!background_thread_enabled_at_fork) { @@ -760,8 +771,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { if (info->state != background_thread_stopped) { num_runs += info->tot_n_runs; nstime_add(&stats->run_interval, &info->tot_sleep_time); - malloc_mutex_prof_max_update(tsdn, - &stats->max_counter_per_bg_thd, &info->mtx); + malloc_mutex_prof_max_update( + tsdn, &stats->max_counter_per_bg_thd, &info->mtx); } malloc_mutex_unlock(tsdn, &info->mtx); } @@ -774,9 +785,9 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { return false; } -#undef BACKGROUND_THREAD_NPAGES_THRESHOLD -#undef BILLION -#undef BACKGROUND_THREAD_MIN_INTERVAL_NS +# undef BACKGROUND_THREAD_NPAGES_THRESHOLD +# undef BILLION +# undef BACKGROUND_THREAD_MIN_INTERVAL_NS /* * When lazy lock is enabled, we need to make sure setting isthreaded before @@ -787,24 +798,24 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { void background_thread_ctl_init(tsdn_t *tsdn) { malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); -#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER +# ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER pthread_create_fptr_init(); pthread_create_wrapper_init(); -#endif +# endif } #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ -bool -background_thread_boot0(void) { + bool background_thread_boot0(void) { if (!have_background_thread && opt_background_thread) { - malloc_printf(": option background_thread currently " + malloc_printf( + ": option background_thread currently " "supports pthread only\n"); return true; } #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER - if ((config_lazy_lock || opt_background_thread) && - pthread_create_fptr_init()) { + if ((config_lazy_lock || opt_background_thread) + && pthread_create_fptr_init()) { return true; } #endif @@ -823,15 +834,15 @@ background_thread_boot1(tsdn_t *tsdn, base_t *base) { max_background_threads = opt_max_background_threads; if (malloc_mutex_init(&background_thread_lock, - "background_thread_global", - WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, - malloc_mutex_rank_exclusive)) { + "background_thread_global", + WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, + malloc_mutex_rank_exclusive)) { return true; } background_thread_info = (background_thread_info_t *)base_alloc(tsdn, - base, opt_max_background_threads * - sizeof(background_thread_info_t), CACHELINE); + base, opt_max_background_threads * sizeof(background_thread_info_t), + CACHELINE); if (background_thread_info == NULL) { return true; } @@ -840,8 +851,8 @@ background_thread_boot1(tsdn_t *tsdn, base_t *base) { background_thread_info_t *info = &background_thread_info[i]; /* Thread mutex is rank_inclusive because of thread0. */ if (malloc_mutex_init(&info->mtx, "background_thread", - WITNESS_RANK_BACKGROUND_THREAD, - malloc_mutex_address_ordered)) { + WITNESS_RANK_BACKGROUND_THREAD, + malloc_mutex_address_ordered)) { return true; } if (pthread_cond_init(&info->cond, NULL)) { diff --git a/src/base.c b/src/base.c index 52f3d1d3..c494556c 100644 --- a/src/base.c +++ b/src/base.c @@ -12,7 +12,7 @@ * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. */ -#define BASE_AUTO_THP_THRESHOLD 2 +#define BASE_AUTO_THP_THRESHOLD 2 #define BASE_AUTO_THP_THRESHOLD_A0 5 /******************************************************************************/ @@ -22,25 +22,21 @@ static base_t *b0; metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT; -const char *const metadata_thp_mode_names[] = { - "disabled", - "auto", - "always" -}; +const char *const metadata_thp_mode_names[] = {"disabled", "auto", "always"}; /******************************************************************************/ static inline bool metadata_thp_madvise(void) { - return (metadata_thp_enabled() && - (init_system_thp_mode == thp_mode_default)); + return (metadata_thp_enabled() + && (init_system_thp_mode == thp_mode_default)); } static void * base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) { void *addr; - bool zero = true; - bool commit = true; + bool zero = true; + bool commit = true; /* * Use huge page sizes and alignment when opt_metadata_thp is enabled @@ -56,16 +52,16 @@ base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) { if (ehooks_are_default(ehooks)) { addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); } else { - addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero, - &commit); + addr = ehooks_alloc( + tsdn, ehooks, NULL, size, alignment, &zero, &commit); } return addr; } static void -base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr, - size_t size) { +base_unmap( + tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr, size_t size) { /* * Cascade through dalloc, decommit, purge_forced, and purge_lazy, * stopping at first success. This cascade is performed for consistency @@ -109,8 +105,8 @@ base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr, label_done: if (metadata_thp_madvise()) { /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */ - assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && - (size & HUGEPAGE_MASK) == 0); + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 + && (size & HUGEPAGE_MASK) == 0); pages_nohuge(addr, size); } } @@ -126,8 +122,8 @@ base_edata_is_reused(edata_t *edata) { } static void -base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr, - size_t size) { +base_edata_init( + size_t *extent_sn_next, edata_t *edata, void *addr, size_t size) { size_t sn; sn = *extent_sn_next; @@ -174,9 +170,9 @@ huge_arena_auto_thp_switch(tsdn_t *tsdn, pac_thp_t *pac_thp) { unsigned cnt = 0; edata_t *edata; - ql_foreach(edata, &pending_list->head, ql_link_active) { + ql_foreach (edata, &pending_list->head, ql_link_active) { assert(edata != NULL); - void *addr = edata_addr_get(edata); + void *addr = edata_addr_get(edata); size_t size = edata_size_get(edata); assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size && size != 0); @@ -196,11 +192,11 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { /* Called when adding a new block. */ bool should_switch; if (base_ind_get(base) != 0) { - should_switch = (base_get_num_blocks(base, true) == - BASE_AUTO_THP_THRESHOLD); + should_switch = (base_get_num_blocks(base, true) + == BASE_AUTO_THP_THRESHOLD); } else { - should_switch = (base_get_num_blocks(base, true) == - BASE_AUTO_THP_THRESHOLD_A0); + should_switch = (base_get_num_blocks(base, true) + == BASE_AUTO_THP_THRESHOLD_A0); } if (!should_switch) { return; @@ -214,8 +210,9 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { assert((block->size & HUGEPAGE_MASK) == 0); pages_huge(block, block->size); if (config_stats) { - base->n_thp += HUGEPAGE_CEILING(block->size - - edata_bsize_get(&block->edata)) >> LG_HUGEPAGE; + base->n_thp += HUGEPAGE_CEILING(block->size + - edata_bsize_get(&block->edata)) + >> LG_HUGEPAGE; } block = block->next; assert(block == NULL || (base_ind_get(base) == 0)); @@ -242,20 +239,22 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { } static void * -base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size, - size_t alignment) { +base_extent_bump_alloc_helper( + edata_t *edata, size_t *gap_size, size_t size, size_t alignment) { void *ret; assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); assert(size == ALIGNMENT_CEILING(size, alignment)); - *gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata), - alignment) - (uintptr_t)edata_addr_get(edata); + *gap_size = ALIGNMENT_CEILING( + (uintptr_t)edata_addr_get(edata), alignment) + - (uintptr_t)edata_addr_get(edata); ret = (void *)((byte_t *)edata_addr_get(edata) + *gap_size); assert(edata_bsize_get(edata) >= *gap_size + size); - edata_binit(edata, (void *)((byte_t *)edata_addr_get(edata) + - *gap_size + size), edata_bsize_get(edata) - *gap_size - size, - edata_sn_get(edata), base_edata_is_reused(edata)); + edata_binit(edata, + (void *)((byte_t *)edata_addr_get(edata) + *gap_size + size), + edata_bsize_get(edata) - *gap_size - size, edata_sn_get(edata), + base_edata_is_reused(edata)); return ret; } @@ -312,24 +311,26 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, edata_t *edata, * crossed by the new allocation. Adjust n_thp similarly when * metadata_thp is enabled. */ - base->resident += PAGE_CEILING((uintptr_t)addr + size) - - PAGE_CEILING((uintptr_t)addr - gap_size); + base->resident += PAGE_CEILING((uintptr_t)addr + size) + - PAGE_CEILING((uintptr_t)addr - gap_size); assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); - if (metadata_thp_madvise() && (opt_metadata_thp == - metadata_thp_always || base->auto_thp_switched)) { + if (metadata_thp_madvise() + && (opt_metadata_thp == metadata_thp_always + || base->auto_thp_switched)) { base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) - - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> - LG_HUGEPAGE; + - HUGEPAGE_CEILING( + (uintptr_t)addr - gap_size)) + >> LG_HUGEPAGE; assert(base->mapped >= base->n_thp << LG_HUGEPAGE); } } } static void * -base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size, - size_t alignment) { - void *ret; +base_extent_bump_alloc( + tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size, size_t alignment) { + void *ret; size_t gap_size; ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment); @@ -339,9 +340,9 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size, static size_t base_block_size_ceil(size_t block_size) { - return opt_metadata_thp == metadata_thp_disabled ? - ALIGNMENT_CEILING(block_size, BASE_BLOCK_MIN_ALIGN) : - HUGEPAGE_CEILING(block_size); + return opt_metadata_thp == metadata_thp_disabled + ? ALIGNMENT_CEILING(block_size, BASE_BLOCK_MIN_ALIGN) + : HUGEPAGE_CEILING(block_size); } /* @@ -356,8 +357,8 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind, alignment = ALIGNMENT_CEILING(alignment, QUANTUM); size_t usize = ALIGNMENT_CEILING(size, alignment); size_t header_size = sizeof(base_block_t); - size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - - header_size; + size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) + - header_size; /* * Create increasingly larger blocks in order to limit the total number * of disjoint virtual memory ranges. Choose the next size in the page @@ -365,27 +366,29 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind, * HUGEPAGE when using metadata_thp), or a size large enough to satisfy * the requested size and alignment, whichever is larger. */ - size_t min_block_size = base_block_size_ceil(sz_psz2u(header_size + - gap_size + usize)); - pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ? - *pind_last + 1 : *pind_last; - size_t next_block_size = base_block_size_ceil(sz_pind2sz(pind_next)); - size_t block_size = (min_block_size > next_block_size) ? min_block_size - : next_block_size; - base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind, - block_size); + size_t min_block_size = base_block_size_ceil( + sz_psz2u(header_size + gap_size + usize)); + pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) + ? *pind_last + 1 + : *pind_last; + size_t next_block_size = base_block_size_ceil(sz_pind2sz(pind_next)); + size_t block_size = (min_block_size > next_block_size) + ? min_block_size + : next_block_size; + base_block_t *block = (base_block_t *)base_map( + tsdn, ehooks, ind, block_size); if (block == NULL) { return NULL; } if (metadata_thp_madvise()) { void *addr = (void *)block; - assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && - (block_size & HUGEPAGE_MASK) == 0); + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 + && (block_size & HUGEPAGE_MASK) == 0); if (opt_metadata_thp == metadata_thp_always) { pages_huge(addr, block_size); - } else if (opt_metadata_thp == metadata_thp_auto && - base != NULL) { + } else if (opt_metadata_thp == metadata_thp_auto + && base != NULL) { /* base != NULL indicates this is not a new base. */ malloc_mutex_lock(tsdn, &base->mtx); base_auto_thp_switch(tsdn, base); @@ -432,12 +435,12 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { base->allocated += sizeof(base_block_t); base->resident += PAGE_CEILING(sizeof(base_block_t)); base->mapped += block->size; - if (metadata_thp_madvise() && - !(opt_metadata_thp == metadata_thp_auto - && !base->auto_thp_switched)) { + if (metadata_thp_madvise() + && !(opt_metadata_thp == metadata_thp_auto + && !base->auto_thp_switched)) { assert(base->n_thp > 0); - base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> - LG_HUGEPAGE; + base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) + >> LG_HUGEPAGE; } assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); @@ -455,7 +458,7 @@ base_t * base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, bool metadata_use_hooks) { pszind_t pind_last = 0; - size_t extent_sn_next = 0; + size_t extent_sn_next = 0; /* * The base will contain the ehooks eventually, but it itself is @@ -463,9 +466,10 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, * memory, and then initialize the ehooks within the base_t. */ ehooks_t fake_ehooks; - ehooks_init(&fake_ehooks, metadata_use_hooks ? - (extent_hooks_t *)extent_hooks : - (extent_hooks_t *)&ehooks_default_extent_hooks, ind); + ehooks_init(&fake_ehooks, + metadata_use_hooks ? (extent_hooks_t *)extent_hooks + : (extent_hooks_t *)&ehooks_default_extent_hooks, + ind); base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind, &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); @@ -473,17 +477,18 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, return NULL; } - size_t gap_size; - size_t base_alignment = CACHELINE; - size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); - base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata, - &gap_size, base_size, base_alignment); + size_t gap_size; + size_t base_alignment = CACHELINE; + size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); + base_t *base = (base_t *)base_extent_bump_alloc_helper( + &block->edata, &gap_size, base_size, base_alignment); ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind); - ehooks_init(&base->ehooks_base, metadata_use_hooks ? - (extent_hooks_t *)extent_hooks : - (extent_hooks_t *)&ehooks_default_extent_hooks, ind); + ehooks_init(&base->ehooks_base, + metadata_use_hooks ? (extent_hooks_t *)extent_hooks + : (extent_hooks_t *)&ehooks_default_extent_hooks, + ind); if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { base_unmap(tsdn, &fake_ehooks, ind, block, block->size); return NULL; } @@ -502,9 +507,10 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, base->allocated = sizeof(base_block_t); base->resident = PAGE_CEILING(sizeof(base_block_t)); base->mapped = block->size; - base->n_thp = (opt_metadata_thp == metadata_thp_always) && - metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t)) - >> LG_HUGEPAGE : 0; + base->n_thp = (opt_metadata_thp == metadata_thp_always) + && metadata_thp_madvise() + ? HUGEPAGE_CEILING(sizeof(base_block_t)) >> LG_HUGEPAGE + : 0; assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped); @@ -512,8 +518,8 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, /* Locking here is only necessary because of assertions. */ malloc_mutex_lock(tsdn, &base->mtx); - base_extent_bump_alloc_post(tsdn, base, &block->edata, gap_size, base, - base_size); + base_extent_bump_alloc_post( + tsdn, base, &block->edata, gap_size, base, base_size); malloc_mutex_unlock(tsdn, &base->mtx); return base; @@ -521,13 +527,13 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, void base_delete(tsdn_t *tsdn, base_t *base) { - ehooks_t *ehooks = base_ehooks_get_for_metadata(base); + ehooks_t *ehooks = base_ehooks_get_for_metadata(base); base_block_t *next = base->blocks; do { base_block_t *block = next; next = block->next; - base_unmap(tsdn, ehooks, base_ind_get(base), block, - block->size); + base_unmap( + tsdn, ehooks, base_ind_get(base), block, block->size); } while (next != NULL); } @@ -543,8 +549,8 @@ base_ehooks_get_for_metadata(base_t *base) { extent_hooks_t * base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { - extent_hooks_t *old_extent_hooks = - ehooks_get_extent_hooks_ptr(&base->ehooks); + extent_hooks_t *old_extent_hooks = ehooks_get_extent_hooks_ptr( + &base->ehooks); ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks)); return old_extent_hooks; } @@ -602,9 +608,9 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { edata_t * base_alloc_edata(tsdn_t *tsdn, base_t *base) { - size_t esn, usize; - edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t), - EDATA_ALIGNMENT, &esn, &usize); + size_t esn, usize; + edata_t *edata = base_alloc_impl( + tsdn, base, sizeof(edata_t), EDATA_ALIGNMENT, &esn, &usize); if (edata == NULL) { return NULL; } @@ -618,8 +624,8 @@ base_alloc_edata(tsdn_t *tsdn, base_t *base) { void * base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size) { size_t usize; - void *rtree = base_alloc_impl(tsdn, base, size, CACHELINE, NULL, - &usize); + void *rtree = base_alloc_impl( + tsdn, base, size, CACHELINE, NULL, &usize); if (rtree == NULL) { return NULL; } @@ -632,8 +638,8 @@ base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size) { static inline void b0_alloc_header_size(size_t *header_size, size_t *alignment) { *alignment = QUANTUM; - *header_size = QUANTUM > sizeof(edata_t *) ? QUANTUM : - sizeof(edata_t *); + *header_size = QUANTUM > sizeof(edata_t *) ? QUANTUM + : sizeof(edata_t *); } /* @@ -645,7 +651,7 @@ b0_alloc_header_size(size_t *header_size, size_t *alignment) { */ void * b0_alloc_tcache_stack(tsdn_t *tsdn, size_t stack_size) { - base_t *base = b0get(); + base_t *base = b0get(); edata_t *edata = base_alloc_base_edata(tsdn, base); if (edata == NULL) { return NULL; @@ -662,8 +668,8 @@ b0_alloc_tcache_stack(tsdn_t *tsdn, size_t stack_size) { b0_alloc_header_size(&header_size, &alignment); size_t alloc_size = sz_s2u(stack_size + header_size); - void *addr = base_alloc_impl(tsdn, base, alloc_size, alignment, &esn, - NULL); + void *addr = base_alloc_impl( + tsdn, base, alloc_size, alignment, &esn, NULL); if (addr == NULL) { edata_avail_insert(&base->edata_avail, edata); return NULL; @@ -683,8 +689,8 @@ b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack) { b0_alloc_header_size(&header_size, &alignment); edata_t *edata = *(edata_t **)((byte_t *)tcache_stack - header_size); - void *addr = edata_addr_get(edata); - size_t bsize = edata_bsize_get(edata); + void *addr = edata_addr_get(edata); + size_t bsize = edata_bsize_get(edata); /* Marked as "reused" to avoid double counting stats. */ assert(base_edata_is_reused(edata)); assert(addr != NULL && bsize > 0); @@ -707,7 +713,8 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, malloc_mutex_lock(tsdn, &base->mtx); assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); - assert(base->edata_allocated + base->rtree_allocated <= base->allocated); + assert( + base->edata_allocated + base->rtree_allocated <= base->allocated); *allocated = base->allocated; *edata_allocated = base->edata_allocated; *rtree_allocated = base->rtree_allocated; diff --git a/src/batcher.c b/src/batcher.c index 2570b3a9..af71dae5 100644 --- a/src/batcher.c +++ b/src/batcher.c @@ -18,8 +18,8 @@ batcher_init(batcher_t *batcher, size_t nelems_max) { * Returns an index (into some user-owned array) to use for pushing, or * BATCHER_NO_IDX if no index is free. */ -size_t batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher, - size_t elems_to_push) { +size_t +batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher, size_t elems_to_push) { assert(elems_to_push > 0); size_t nelems_guess = atomic_load_zu(&batcher->nelems, ATOMIC_RELAXED); if (nelems_guess + elems_to_push > batcher->nelems_max) { @@ -37,7 +37,8 @@ size_t batcher_push_begin(tsdn_t *tsdn, batcher_t *batcher, * racing accesses of the batcher can fail fast instead of trying to * acquire a mutex only to discover that there's no space for them. */ - atomic_store_zu(&batcher->nelems, nelems + elems_to_push, ATOMIC_RELAXED); + atomic_store_zu( + &batcher->nelems, nelems + elems_to_push, ATOMIC_RELAXED); batcher->npushes++; return nelems; } @@ -75,7 +76,8 @@ batcher_pop_begin(tsdn_t *tsdn, batcher_t *batcher) { return nelems; } -void batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher) { +void +batcher_pop_end(tsdn_t *tsdn, batcher_t *batcher) { assert(atomic_load_zu(&batcher->nelems, ATOMIC_RELAXED) == 0); malloc_mutex_unlock(tsdn, &batcher->mtx); } diff --git a/src/bin.c b/src/bin.c index 267aa0f3..98d1da02 100644 --- a/src/bin.c +++ b/src/bin.c @@ -10,8 +10,8 @@ unsigned bin_batching_test_ndalloc_slabs_max = (unsigned)-1; void (*bin_batching_test_after_push_hook)(size_t push_idx); void (*bin_batching_test_mid_pop_hook)(size_t nelems_to_pop); -void (*bin_batching_test_after_unlock_hook)(unsigned slab_dalloc_count, - bool list_empty); +void (*bin_batching_test_after_unlock_hook)( + unsigned slab_dalloc_count, bool list_empty); #endif bool @@ -49,7 +49,7 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) { bool bin_init(bin_t *bin, unsigned binind) { if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { return true; } bin->slabcur = NULL; @@ -60,8 +60,8 @@ bin_init(bin_t *bin, unsigned binind) { } if (arena_bin_has_batch(binind)) { bin_with_batch_t *batched_bin = (bin_with_batch_t *)bin; - batcher_init(&batched_bin->remote_frees, - opt_bin_info_remote_free_max); + batcher_init( + &batched_bin->remote_frees, opt_bin_info_remote_free_max); } return false; } diff --git a/src/bin_info.c b/src/bin_info.c index f8a64ae3..de93418a 100644 --- a/src/bin_info.c +++ b/src/bin_info.c @@ -19,7 +19,7 @@ size_t opt_bin_info_remote_free_max = BIN_REMOTE_FREE_ELEMS_MAX; bin_info_t bin_infos[SC_NBINS]; -szind_t bin_info_nbatched_sizes; +szind_t bin_info_nbatched_sizes; unsigned bin_info_nbatched_bins; unsigned bin_info_nunbatched_bins; @@ -28,12 +28,12 @@ bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], bin_info_t infos[SC_NBINS]) { for (unsigned i = 0; i < SC_NBINS; i++) { bin_info_t *bin_info = &infos[i]; - sc_t *sc = &sc_data->sc[i]; + sc_t *sc = &sc_data->sc[i]; bin_info->reg_size = ((size_t)1U << sc->lg_base) + ((size_t)sc->ndelta << sc->lg_delta); bin_info->slab_size = (sc->pgs << LG_PAGE); - bin_info->nregs = - (uint32_t)(bin_info->slab_size / bin_info->reg_size); + bin_info->nregs = (uint32_t)(bin_info->slab_size + / bin_info->reg_size); bin_info->n_shards = bin_shard_sizes[i]; bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER( bin_info->nregs); diff --git a/src/bitmap.c b/src/bitmap.c index 0ccedc5d..8ac81a67 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -10,7 +10,7 @@ void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; - size_t group_count; + size_t group_count; assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); @@ -24,11 +24,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { group_count = BITMAP_BITS2GROUPS(nbits); for (i = 1; group_count > 1; i++) { assert(i < BITMAP_MAX_LEVELS); - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; + binfo->levels[i].group_offset = + binfo->levels[i - 1].group_offset + group_count; group_count = BITMAP_BITS2GROUPS(group_count); } - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + binfo->levels[i].group_offset = binfo->levels[i - 1].group_offset + group_count; assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); binfo->nlevels = i; @@ -42,7 +42,7 @@ bitmap_info_ngroups(const bitmap_info_t *binfo) { void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { - size_t extra; + size_t extra; unsigned i; /* @@ -69,12 +69,13 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { bitmap[binfo->levels[1].group_offset - 1] >>= extra; } for (i = 1; i < binfo->nlevels; i++) { - size_t group_count = binfo->levels[i].group_offset - - binfo->levels[i-1].group_offset; - extra = (BITMAP_GROUP_NBITS - (group_count & - BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + size_t group_count = binfo->levels[i].group_offset + - binfo->levels[i - 1].group_offset; + extra = (BITMAP_GROUP_NBITS + - (group_count & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; if (extra != 0) { - bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + bitmap[binfo->levels[i + 1].group_offset - 1] >>= extra; } } } diff --git a/src/buf_writer.c b/src/buf_writer.c index 7c6f7940..3c298502 100644 --- a/src/buf_writer.c +++ b/src/buf_writer.c @@ -43,8 +43,9 @@ buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb, if (write_cb != NULL) { buf_writer->write_cb = write_cb; } else { - buf_writer->write_cb = je_malloc_message != NULL ? - je_malloc_message : wrtmessage; + buf_writer->write_cb = je_malloc_message != NULL + ? je_malloc_message + : wrtmessage; } buf_writer->cbopaque = cbopaque; assert(buf_len >= 2); @@ -52,8 +53,8 @@ buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb, buf_writer->buf = buf; buf_writer->internal_buf = false; } else { - buf_writer->buf = buf_writer_allocate_internal_buf(tsdn, - buf_len); + buf_writer->buf = buf_writer_allocate_internal_buf( + tsdn, buf_len); buf_writer->internal_buf = true; } if (buf_writer->buf != NULL) { @@ -111,13 +112,13 @@ buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) { } void -buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb, - void *read_cbopaque) { +buf_writer_pipe( + buf_writer_t *buf_writer, read_cb_t *read_cb, void *read_cbopaque) { /* * A tiny local buffer in case the buffered writer failed to allocate * at init. */ - static char backup_buf[16]; + static char backup_buf[16]; static buf_writer_t backup_buf_writer; buf_writer_assert(buf_writer); diff --git a/src/cache_bin.c b/src/cache_bin.c index 2f5afeb9..ec677948 100644 --- a/src/cache_bin.c +++ b/src/cache_bin.c @@ -8,8 +8,7 @@ const uintptr_t disabled_bin = JUNK_ADDR; void -cache_bin_info_init(cache_bin_info_t *info, - cache_bin_sz_t ncached_max) { +cache_bin_info_init(cache_bin_info_t *info, cache_bin_sz_t ncached_max) { assert(ncached_max <= CACHE_BIN_NCACHED_MAX); size_t stack_size = (size_t)ncached_max * sizeof(void *); assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8))); @@ -51,27 +50,26 @@ cache_bin_info_compute_alloc(const cache_bin_info_t *infos, szind_t ninfos, } void -cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos, void *alloc, - size_t *cur_offset) { +cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos, + void *alloc, size_t *cur_offset) { if (config_debug) { size_t computed_size; size_t computed_alignment; /* Pointer should be as aligned as we asked for. */ - cache_bin_info_compute_alloc(infos, ninfos, &computed_size, - &computed_alignment); + cache_bin_info_compute_alloc( + infos, ninfos, &computed_size, &computed_alignment); assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0); } - *(uintptr_t *)((byte_t *)alloc + *cur_offset) = - cache_bin_preceding_junk; + *(uintptr_t *)((byte_t *)alloc + + *cur_offset) = cache_bin_preceding_junk; *cur_offset += sizeof(void *); } void cache_bin_postincrement(void *alloc, size_t *cur_offset) { - *(uintptr_t *)((byte_t *)alloc + *cur_offset) = - cache_bin_trailing_junk; + *(uintptr_t *)((byte_t *)alloc + *cur_offset) = cache_bin_trailing_junk; *cur_offset += sizeof(void *); } @@ -83,8 +81,8 @@ cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc, * will access the slots toward higher addresses (for the benefit of * adjacent prefetch). */ - void *stack_cur = (void *)((byte_t *)alloc + *cur_offset); - void *full_position = stack_cur; + void *stack_cur = (void *)((byte_t *)alloc + *cur_offset); + void *full_position = stack_cur; cache_bin_sz_t bin_stack_size = info->ncached_max * sizeof(void *); *cur_offset += bin_stack_size; @@ -96,8 +94,8 @@ cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc, bin->low_bits_full = (cache_bin_sz_t)(uintptr_t)full_position; bin->low_bits_empty = (cache_bin_sz_t)(uintptr_t)empty_position; cache_bin_info_init(&bin->bin_info, info->ncached_max); - cache_bin_sz_t free_spots = cache_bin_diff(bin, - bin->low_bits_full, (cache_bin_sz_t)(uintptr_t)bin->stack_head); + cache_bin_sz_t free_spots = cache_bin_diff(bin, bin->low_bits_full, + (cache_bin_sz_t)(uintptr_t)bin->stack_head); assert(free_spots == bin_stack_size); if (!cache_bin_disabled(bin)) { assert(cache_bin_ncached_get_local(bin) == 0); @@ -109,8 +107,8 @@ cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc, void cache_bin_init_disabled(cache_bin_t *bin, cache_bin_sz_t ncached_max) { - const void *fake_stack = cache_bin_disabled_bin_stack(); - size_t fake_offset = 0; + const void *fake_stack = cache_bin_disabled_bin_stack(); + size_t fake_offset = 0; cache_bin_info_t fake_info; cache_bin_info_init(&fake_info, 0); cache_bin_init(bin, &fake_info, (void *)fake_stack, &fake_offset); diff --git a/src/ckh.c b/src/ckh.c index 8db4319c..80688162 100644 --- a/src/ckh.c +++ b/src/ckh.c @@ -49,8 +49,8 @@ /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); -static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); +static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); +static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); /******************************************************************************/ @@ -60,7 +60,7 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); */ static size_t ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { - ckhc_t *cell; + ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { @@ -98,20 +98,20 @@ ckh_isearch(ckh_t *ckh, const void *key) { } static bool -ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) { - ckhc_t *cell; +ckh_try_bucket_insert( + ckh_t *ckh, size_t bucket, const void *key, const void *data) { + ckhc_t *cell; unsigned offset, i; /* * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ - offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, - LG_CKH_BUCKET_CELLS); + offset = (unsigned)prng_lg_range_u64( + &ckh->prng_state, LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + - ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; if (cell->key == NULL) { cell->key = key; cell->data = data; @@ -130,12 +130,12 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * eviction/relocation bucket cycle. */ static bool -ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) { +ckh_evict_reloc_insert( + ckh_t *ckh, size_t argbucket, void const **argkey, void const **argdata) { const void *key, *data, *tkey, *tdata; - ckhc_t *cell; - size_t hashes[2], bucket, tbucket; - unsigned i; + ckhc_t *cell; + size_t hashes[2], bucket, tbucket; + unsigned i; bucket = argbucket; key = *argkey; @@ -149,15 +149,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, * were an item for which both hashes indicated the same * bucket. */ - i = (unsigned)prng_lg_range_u64(&ckh->prng_state, - LG_CKH_BUCKET_CELLS); + i = (unsigned)prng_lg_range_u64( + &ckh->prng_state, LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); /* Swap cell->{key,data} and {key,data} (evict). */ - tkey = cell->key; tdata = cell->data; - cell->key = key; cell->data = data; - key = tkey; data = tdata; + tkey = cell->key; + tdata = cell->data; + cell->key = key; + cell->data = data; + key = tkey; + data = tdata; #ifdef CKH_COUNT ckh->nrelocs++; @@ -167,8 +170,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ckh->hash(key, hashes); tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (tbucket == bucket) { - tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - - 1); + tbucket = hashes[0] + & ((ZU(1) << ckh->lg_curbuckets) - 1); /* * It may be that (tbucket == bucket) still, if the * item's hashes both indicate this bucket. However, @@ -201,8 +204,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, } static bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { - size_t hashes[2], bucket; +ckh_try_insert(ckh_t *ckh, void const **argkey, void const **argdata) { + size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; @@ -232,7 +235,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { */ static bool ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { - size_t count, i, nins; + size_t count, i, nins; const void *key, *data; count = ckh->count; @@ -254,8 +257,8 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { static bool ckh_grow(tsd_t *tsd, ckh_t *ckh) { - bool ret; - ckhc_t *tab, *ttab; + bool ret; + ckhc_t *tab, *ttab; unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT @@ -274,8 +277,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) { lg_curcells++; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (unlikely(usize == 0 - || usize > SC_LARGE_MAXCLASS)) { + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { ret = true; goto label_return; } @@ -309,8 +311,8 @@ label_return: static void ckh_shrink(tsd_t *tsd, ckh_t *ckh) { - ckhc_t *tab, *ttab; - size_t usize; + ckhc_t *tab, *ttab; + size_t usize; unsigned lg_prevbuckets, lg_curcells; /* @@ -358,8 +360,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) { bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash, ckh_keycomp_t *keycomp) { - bool ret; - size_t mincells, usize; + bool ret; + size_t mincells, usize; unsigned lg_mincells; assert(minitems > 0); @@ -386,8 +388,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash, assert(LG_CKH_BUCKET_CELLS > 0); mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; - (ZU(1) << lg_mincells) < mincells; - lg_mincells++) { + (ZU(1) << lg_mincells) < mincells; lg_mincells++) { /* Do nothing. */ } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; @@ -417,11 +418,12 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE - malloc_printf( - "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," - " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," - " nrelocs: %"FMTu64"\n", __func__, ckh, - (unsigned long long)ckh->ngrows, + malloc_printf("%s(%p): ngrows: %" FMTu64 ", nshrinks: %" FMTu64 + "," + " nshrinkfails: %" FMTu64 ", ninserts: %" FMTu64 + "," + " nrelocs: %" FMTu64 "\n", + __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, (unsigned long long)ckh->ninserts, @@ -445,8 +447,9 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; - for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + - LG_CKH_BUCKET_CELLS)); i < ncells; i++) { + for (i = *tabind, + ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); + i < ncells; i++) { if (ckh->tab[i].key != NULL) { if (key != NULL) { *key = (void *)ckh->tab[i].key; @@ -486,8 +489,8 @@ label_return: } bool -ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data) { +ckh_remove( + tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); @@ -505,9 +508,9 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, ckh->count--; /* Try to halve the table if it is less than 1/4 full. */ - if (ckh->count < (ZU(1) << (ckh->lg_curbuckets - + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets - > ckh->lg_minbuckets) { + if (ckh->count < (ZU(1) + << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 2)) + && ckh->lg_curbuckets > ckh->lg_minbuckets) { /* Ignore error due to OOM. */ ckh_shrink(tsd, ckh); } @@ -554,8 +557,8 @@ ckh_string_keycomp(const void *k1, const void *k2) { void ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { - const void *v; - size_t i; + const void *v; + size_t i; } u; assert(sizeof(u.v) == sizeof(u.i)); diff --git a/src/counter.c b/src/counter.c index 8f1ae3af..8257a062 100644 --- a/src/counter.c +++ b/src/counter.c @@ -6,7 +6,7 @@ bool counter_accum_init(counter_accum_t *counter, uint64_t interval) { if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum", - WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) { return true; } locked_init_u64_unsynchronized(&counter->accumbytes, 0); diff --git a/src/ctl.c b/src/ctl.c index 4f06363a..9e9a4b43 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -25,10 +25,10 @@ * ctl_mtx protects the following: * - ctl_stats->* */ -static malloc_mutex_t ctl_mtx; -static bool ctl_initialized; -static ctl_stats_t *ctl_stats; -static ctl_arenas_t *ctl_arenas; +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static ctl_stats_t *ctl_stats; +static ctl_arenas_t *ctl_arenas; /******************************************************************************/ /* Helpers for named and indexed nodes. */ @@ -53,13 +53,13 @@ ctl_indexed_node(const ctl_node_t *node) { /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -#define CTL_PROTO(n) \ -static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen); +#define CTL_PROTO(n) \ + static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ - const size_t *mib, size_t miblen, size_t i); +#define INDEX_PROTO(n) \ + static const ctl_named_node_t *n##_index( \ + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i); CTL_PROTO(version) CTL_PROTO(epoch) @@ -374,14 +374,14 @@ CTL_PROTO(experimental_prof_recent_alloc_dump) CTL_PROTO(experimental_batch_alloc) CTL_PROTO(experimental_arenas_create_ext) -#define MUTEX_STATS_CTL_PROTO_GEN(n) \ -CTL_PROTO(stats_##n##_num_ops) \ -CTL_PROTO(stats_##n##_num_wait) \ -CTL_PROTO(stats_##n##_num_spin_acq) \ -CTL_PROTO(stats_##n##_num_owner_switch) \ -CTL_PROTO(stats_##n##_total_wait_time) \ -CTL_PROTO(stats_##n##_max_wait_time) \ -CTL_PROTO(stats_##n##_max_num_thds) +#define MUTEX_STATS_CTL_PROTO_GEN(n) \ + CTL_PROTO(stats_##n##_num_ops) \ + CTL_PROTO(stats_##n##_num_wait) \ + CTL_PROTO(stats_##n##_num_spin_acq) \ + CTL_PROTO(stats_##n##_num_owner_switch) \ + CTL_PROTO(stats_##n##_total_wait_time) \ + CTL_PROTO(stats_##n##_max_wait_time) \ + CTL_PROTO(stats_##n##_max_num_thds) /* Global mutexes. */ #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) @@ -402,542 +402,448 @@ CTL_PROTO(stats_mutexes_reset) /******************************************************************************/ /* mallctl tree. */ -#define NAME(n) {true}, n -#define CHILD(t, c) \ - sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ - (ctl_node_t *)c##_node, \ - NULL -#define CTL(c) 0, NULL, c##_ctl +#define NAME(n) {true}, n +#define CHILD(t, c) \ + sizeof(c##_node) / sizeof(ctl_##t##_node_t), (ctl_node_t *)c##_node, \ + NULL +#define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ -#define INDEX(i) {false}, i##_index +#define INDEX(i) {false}, i##_index -static const ctl_named_node_t thread_tcache_ncached_max_node[] = { - {NAME("read_sizeclass"), - CTL(thread_tcache_ncached_max_read_sizeclass)}, - {NAME("write"), CTL(thread_tcache_ncached_max_write)} +static const ctl_named_node_t thread_tcache_ncached_max_node[] = { + {NAME("read_sizeclass"), CTL(thread_tcache_ncached_max_read_sizeclass)}, + {NAME("write"), CTL(thread_tcache_ncached_max_write)}}; + +static const ctl_named_node_t thread_tcache_node[] = { + {NAME("enabled"), CTL(thread_tcache_enabled)}, + {NAME("max"), CTL(thread_tcache_max)}, + {NAME("flush"), CTL(thread_tcache_flush)}, + {NAME("ncached_max"), CHILD(named, thread_tcache_ncached_max)}}; + +static const ctl_named_node_t thread_peak_node[] = { + {NAME("read"), CTL(thread_peak_read)}, + {NAME("reset"), CTL(thread_peak_reset)}, }; -static const ctl_named_node_t thread_tcache_node[] = { - {NAME("enabled"), CTL(thread_tcache_enabled)}, - {NAME("max"), CTL(thread_tcache_max)}, - {NAME("flush"), CTL(thread_tcache_flush)}, - {NAME("ncached_max"), CHILD(named, thread_tcache_ncached_max)} -}; +static const ctl_named_node_t thread_prof_node[] = { + {NAME("name"), CTL(thread_prof_name)}, + {NAME("active"), CTL(thread_prof_active)}}; -static const ctl_named_node_t thread_peak_node[] = { - {NAME("read"), CTL(thread_peak_read)}, - {NAME("reset"), CTL(thread_peak_reset)}, -}; +static const ctl_named_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)}, + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)}, + {NAME("tcache"), CHILD(named, thread_tcache)}, + {NAME("peak"), CHILD(named, thread_peak)}, + {NAME("prof"), CHILD(named, thread_prof)}, + {NAME("idle"), CTL(thread_idle)}}; -static const ctl_named_node_t thread_prof_node[] = { - {NAME("name"), CTL(thread_prof_name)}, - {NAME("active"), CTL(thread_prof_active)} -}; - -static const ctl_named_node_t thread_node[] = { - {NAME("arena"), CTL(thread_arena)}, - {NAME("allocated"), CTL(thread_allocated)}, - {NAME("allocatedp"), CTL(thread_allocatedp)}, - {NAME("deallocated"), CTL(thread_deallocated)}, - {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(named, thread_tcache)}, - {NAME("peak"), CHILD(named, thread_peak)}, - {NAME("prof"), CHILD(named, thread_prof)}, - {NAME("idle"), CTL(thread_idle)} -}; - -static const ctl_named_node_t config_node[] = { - {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, - {NAME("debug"), CTL(config_debug)}, - {NAME("fill"), CTL(config_fill)}, - {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("malloc_conf"), CTL(config_malloc_conf)}, - {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)}, - {NAME("prof"), CTL(config_prof)}, - {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, - {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, - {NAME("prof_frameptr"), CTL(config_prof_frameptr)}, - {NAME("stats"), CTL(config_stats)}, - {NAME("utrace"), CTL(config_utrace)}, - {NAME("xmalloc"), CTL(config_xmalloc)} -}; +static const ctl_named_node_t config_node[] = { + {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, + {NAME("debug"), CTL(config_debug)}, {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, + {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("prof_frameptr"), CTL(config_prof_frameptr)}, + {NAME("stats"), CTL(config_stats)}, {NAME("utrace"), CTL(config_utrace)}, + {NAME("xmalloc"), CTL(config_xmalloc)}}; static const ctl_named_node_t opt_malloc_conf_node[] = { - {NAME("symlink"), CTL(opt_malloc_conf_symlink)}, - {NAME("env_var"), CTL(opt_malloc_conf_env_var)}, - {NAME("global_var"), CTL(opt_malloc_conf_global_var)}, - {NAME("global_var_2_conf_harder"), - CTL(opt_malloc_conf_global_var_2_conf_harder)} -}; + {NAME("symlink"), CTL(opt_malloc_conf_symlink)}, + {NAME("env_var"), CTL(opt_malloc_conf_env_var)}, + {NAME("global_var"), CTL(opt_malloc_conf_global_var)}, + {NAME("global_var_2_conf_harder"), + CTL(opt_malloc_conf_global_var_2_conf_harder)}}; -static const ctl_named_node_t opt_node[] = { - {NAME("abort"), CTL(opt_abort)}, - {NAME("abort_conf"), CTL(opt_abort_conf)}, - {NAME("cache_oblivious"), CTL(opt_cache_oblivious)}, - {NAME("trust_madvise"), CTL(opt_trust_madvise)}, - {NAME("confirm_conf"), CTL(opt_confirm_conf)}, - {NAME("hpa"), CTL(opt_hpa)}, - {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)}, - {NAME("hpa_hugification_threshold"), - CTL(opt_hpa_hugification_threshold)}, - {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)}, - {NAME("hpa_hugify_sync"), CTL(opt_hpa_hugify_sync)}, - {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)}, - {NAME("experimental_hpa_max_purge_nhp"), - CTL(opt_experimental_hpa_max_purge_nhp)}, - {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)}, - {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)}, - {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)}, - {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)}, - {NAME("hpa_sec_bytes_after_flush"), - CTL(opt_hpa_sec_bytes_after_flush)}, - {NAME("hpa_sec_batch_fill_extra"), - CTL(opt_hpa_sec_batch_fill_extra)}, - {NAME("huge_arena_pac_thp"), CTL(opt_huge_arena_pac_thp)}, - {NAME("metadata_thp"), CTL(opt_metadata_thp)}, - {NAME("retain"), CTL(opt_retain)}, - {NAME("dss"), CTL(opt_dss)}, - {NAME("narenas"), CTL(opt_narenas)}, - {NAME("percpu_arena"), CTL(opt_percpu_arena)}, - {NAME("oversize_threshold"), CTL(opt_oversize_threshold)}, - {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)}, - {NAME("background_thread"), CTL(opt_background_thread)}, - {NAME("max_background_threads"), CTL(opt_max_background_threads)}, - {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, - {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, - {NAME("stats_print"), CTL(opt_stats_print)}, - {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, - {NAME("stats_interval"), CTL(opt_stats_interval)}, - {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)}, - {NAME("junk"), CTL(opt_junk)}, - {NAME("zero"), CTL(opt_zero)}, - {NAME("utrace"), CTL(opt_utrace)}, - {NAME("xmalloc"), CTL(opt_xmalloc)}, - {NAME("experimental_infallible_new"), - CTL(opt_experimental_infallible_new)}, - {NAME("experimental_tcache_gc"), - CTL(opt_experimental_tcache_gc)}, - {NAME("max_batched_size"), CTL(opt_max_batched_size)}, - {NAME("remote_free_max"), CTL(opt_remote_free_max)}, - {NAME("remote_free_max_batch"), CTL(opt_remote_free_max_batch)}, - {NAME("tcache"), CTL(opt_tcache)}, - {NAME("tcache_max"), CTL(opt_tcache_max)}, - {NAME("tcache_nslots_small_min"), - CTL(opt_tcache_nslots_small_min)}, - {NAME("tcache_nslots_small_max"), - CTL(opt_tcache_nslots_small_max)}, - {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)}, - {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)}, - {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)}, - {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)}, - {NAME("lg_tcache_flush_small_div"), - CTL(opt_lg_tcache_flush_small_div)}, - {NAME("lg_tcache_flush_large_div"), - CTL(opt_lg_tcache_flush_large_div)}, - {NAME("thp"), CTL(opt_thp)}, - {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, - {NAME("prof"), CTL(opt_prof)}, - {NAME("prof_prefix"), CTL(opt_prof_prefix)}, - {NAME("prof_active"), CTL(opt_prof_active)}, - {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, - {NAME("prof_bt_max"), CTL(opt_prof_bt_max)}, - {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, - {NAME("experimental_lg_prof_threshold"), CTL(opt_experimental_lg_prof_threshold)}, - {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, - {NAME("prof_gdump"), CTL(opt_prof_gdump)}, - {NAME("prof_final"), CTL(opt_prof_final)}, - {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_leak_error"), CTL(opt_prof_leak_error)}, - {NAME("prof_accum"), CTL(opt_prof_accum)}, - {NAME("prof_pid_namespace"), CTL(opt_prof_pid_namespace)}, - {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)}, - {NAME("prof_stats"), CTL(opt_prof_stats)}, - {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)}, - {NAME("prof_time_resolution"), CTL(opt_prof_time_res)}, - {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)}, - {NAME("zero_realloc"), CTL(opt_zero_realloc)}, - {NAME("debug_double_free_max_scan"), - CTL(opt_debug_double_free_max_scan)}, - {NAME("disable_large_size_classes"), CTL(opt_disable_large_size_classes)}, - {NAME("process_madvise_max_batch"), CTL(opt_process_madvise_max_batch)}, - {NAME("malloc_conf"), CHILD(named, opt_malloc_conf)} -}; +static const ctl_named_node_t opt_node[] = {{NAME("abort"), CTL(opt_abort)}, + {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("cache_oblivious"), CTL(opt_cache_oblivious)}, + {NAME("trust_madvise"), CTL(opt_trust_madvise)}, + {NAME("confirm_conf"), CTL(opt_confirm_conf)}, {NAME("hpa"), CTL(opt_hpa)}, + {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)}, + {NAME("hpa_hugification_threshold"), CTL(opt_hpa_hugification_threshold)}, + {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)}, + {NAME("hpa_hugify_sync"), CTL(opt_hpa_hugify_sync)}, + {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)}, + {NAME("experimental_hpa_max_purge_nhp"), + CTL(opt_experimental_hpa_max_purge_nhp)}, + {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)}, + {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)}, + {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)}, + {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)}, + {NAME("hpa_sec_bytes_after_flush"), CTL(opt_hpa_sec_bytes_after_flush)}, + {NAME("hpa_sec_batch_fill_extra"), CTL(opt_hpa_sec_batch_fill_extra)}, + {NAME("huge_arena_pac_thp"), CTL(opt_huge_arena_pac_thp)}, + {NAME("metadata_thp"), CTL(opt_metadata_thp)}, + {NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("percpu_arena"), CTL(opt_percpu_arena)}, + {NAME("oversize_threshold"), CTL(opt_oversize_threshold)}, + {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)}, + {NAME("background_thread"), CTL(opt_background_thread)}, + {NAME("max_background_threads"), CTL(opt_max_background_threads)}, + {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, + {NAME("stats_interval"), CTL(opt_stats_interval)}, + {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)}, + {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, + {NAME("utrace"), CTL(opt_utrace)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("experimental_infallible_new"), CTL(opt_experimental_infallible_new)}, + {NAME("experimental_tcache_gc"), CTL(opt_experimental_tcache_gc)}, + {NAME("max_batched_size"), CTL(opt_max_batched_size)}, + {NAME("remote_free_max"), CTL(opt_remote_free_max)}, + {NAME("remote_free_max_batch"), CTL(opt_remote_free_max_batch)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("tcache_max"), CTL(opt_tcache_max)}, + {NAME("tcache_nslots_small_min"), CTL(opt_tcache_nslots_small_min)}, + {NAME("tcache_nslots_small_max"), CTL(opt_tcache_nslots_small_max)}, + {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)}, + {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)}, + {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)}, + {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)}, + {NAME("lg_tcache_flush_small_div"), CTL(opt_lg_tcache_flush_small_div)}, + {NAME("lg_tcache_flush_large_div"), CTL(opt_lg_tcache_flush_large_div)}, + {NAME("thp"), CTL(opt_thp)}, + {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, + {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, + {NAME("prof_bt_max"), CTL(opt_prof_bt_max)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("experimental_lg_prof_threshold"), + CTL(opt_experimental_lg_prof_threshold)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_final"), CTL(opt_prof_final)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_leak_error"), CTL(opt_prof_leak_error)}, + {NAME("prof_accum"), CTL(opt_prof_accum)}, + {NAME("prof_pid_namespace"), CTL(opt_prof_pid_namespace)}, + {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)}, + {NAME("prof_stats"), CTL(opt_prof_stats)}, + {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)}, + {NAME("prof_time_resolution"), CTL(opt_prof_time_res)}, + {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)}, + {NAME("zero_realloc"), CTL(opt_zero_realloc)}, + {NAME("debug_double_free_max_scan"), CTL(opt_debug_double_free_max_scan)}, + {NAME("disable_large_size_classes"), CTL(opt_disable_large_size_classes)}, + {NAME("process_madvise_max_batch"), CTL(opt_process_madvise_max_batch)}, + {NAME("malloc_conf"), CHILD(named, opt_malloc_conf)}}; -static const ctl_named_node_t tcache_node[] = { - {NAME("create"), CTL(tcache_create)}, - {NAME("flush"), CTL(tcache_flush)}, - {NAME("destroy"), CTL(tcache_destroy)} -}; +static const ctl_named_node_t tcache_node[] = { + {NAME("create"), CTL(tcache_create)}, {NAME("flush"), CTL(tcache_flush)}, + {NAME("destroy"), CTL(tcache_destroy)}}; static const ctl_named_node_t arena_i_node[] = { - {NAME("initialized"), CTL(arena_i_initialized)}, - {NAME("decay"), CTL(arena_i_decay)}, - {NAME("purge"), CTL(arena_i_purge)}, - {NAME("reset"), CTL(arena_i_reset)}, - {NAME("destroy"), CTL(arena_i_destroy)}, - {NAME("dss"), CTL(arena_i_dss)}, - /* + {NAME("initialized"), CTL(arena_i_initialized)}, + {NAME("decay"), CTL(arena_i_decay)}, {NAME("purge"), CTL(arena_i_purge)}, + {NAME("reset"), CTL(arena_i_reset)}, + {NAME("destroy"), CTL(arena_i_destroy)}, {NAME("dss"), CTL(arena_i_dss)}, + /* * Undocumented for now, since we anticipate an arena API in flux after * we cut the last 5-series release. */ - {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)}, - {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, - {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, - {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, - {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}, - {NAME("name"), CTL(arena_i_name)} -}; + {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)}, + {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, + {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, + {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}, + {NAME("name"), CTL(arena_i_name)}}; static const ctl_named_node_t super_arena_i_node[] = { - {NAME(""), CHILD(named, arena_i)} -}; + {NAME(""), CHILD(named, arena_i)}}; -static const ctl_indexed_node_t arena_node[] = { - {INDEX(arena_i)} -}; +static const ctl_indexed_node_t arena_node[] = {{INDEX(arena_i)}}; static const ctl_named_node_t arenas_bin_i_node[] = { - {NAME("size"), CTL(arenas_bin_i_size)}, - {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}, - {NAME("nshards"), CTL(arenas_bin_i_nshards)} -}; + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}, + {NAME("nshards"), CTL(arenas_bin_i_nshards)}}; static const ctl_named_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(named, arenas_bin_i)} -}; + {NAME(""), CHILD(named, arenas_bin_i)}}; -static const ctl_indexed_node_t arenas_bin_node[] = { - {INDEX(arenas_bin_i)} -}; +static const ctl_indexed_node_t arenas_bin_node[] = {{INDEX(arenas_bin_i)}}; static const ctl_named_node_t arenas_lextent_i_node[] = { - {NAME("size"), CTL(arenas_lextent_i_size)} -}; + {NAME("size"), CTL(arenas_lextent_i_size)}}; static const ctl_named_node_t super_arenas_lextent_i_node[] = { - {NAME(""), CHILD(named, arenas_lextent_i)} -}; + {NAME(""), CHILD(named, arenas_lextent_i)}}; static const ctl_indexed_node_t arenas_lextent_node[] = { - {INDEX(arenas_lextent_i)} -}; + {INDEX(arenas_lextent_i)}}; static const ctl_named_node_t arenas_node[] = { - {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, - {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, - {NAME("quantum"), CTL(arenas_quantum)}, - {NAME("page"), CTL(arenas_page)}, - {NAME("hugepage"), CTL(arenas_hugepage)}, - {NAME("tcache_max"), CTL(arenas_tcache_max)}, - {NAME("nbins"), CTL(arenas_nbins)}, - {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlextents"), CTL(arenas_nlextents)}, - {NAME("lextent"), CHILD(indexed, arenas_lextent)}, - {NAME("create"), CTL(arenas_create)}, - {NAME("lookup"), CTL(arenas_lookup)} -}; + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, + {NAME("quantum"), CTL(arenas_quantum)}, {NAME("page"), CTL(arenas_page)}, + {NAME("hugepage"), CTL(arenas_hugepage)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, + {NAME("nlextents"), CTL(arenas_nlextents)}, + {NAME("lextent"), CHILD(indexed, arenas_lextent)}, + {NAME("create"), CTL(arenas_create)}, {NAME("lookup"), CTL(arenas_lookup)}}; static const ctl_named_node_t prof_stats_bins_i_node[] = { - {NAME("live"), CTL(prof_stats_bins_i_live)}, - {NAME("accum"), CTL(prof_stats_bins_i_accum)} -}; + {NAME("live"), CTL(prof_stats_bins_i_live)}, + {NAME("accum"), CTL(prof_stats_bins_i_accum)}}; static const ctl_named_node_t super_prof_stats_bins_i_node[] = { - {NAME(""), CHILD(named, prof_stats_bins_i)} -}; + {NAME(""), CHILD(named, prof_stats_bins_i)}}; static const ctl_indexed_node_t prof_stats_bins_node[] = { - {INDEX(prof_stats_bins_i)} -}; + {INDEX(prof_stats_bins_i)}}; static const ctl_named_node_t prof_stats_lextents_i_node[] = { - {NAME("live"), CTL(prof_stats_lextents_i_live)}, - {NAME("accum"), CTL(prof_stats_lextents_i_accum)} -}; + {NAME("live"), CTL(prof_stats_lextents_i_live)}, + {NAME("accum"), CTL(prof_stats_lextents_i_accum)}}; static const ctl_named_node_t super_prof_stats_lextents_i_node[] = { - {NAME(""), CHILD(named, prof_stats_lextents_i)} -}; + {NAME(""), CHILD(named, prof_stats_lextents_i)}}; static const ctl_indexed_node_t prof_stats_lextents_node[] = { - {INDEX(prof_stats_lextents_i)} + {INDEX(prof_stats_lextents_i)}}; + +static const ctl_named_node_t prof_stats_node[] = { + {NAME("bins"), CHILD(indexed, prof_stats_bins)}, + {NAME("lextents"), CHILD(indexed, prof_stats_lextents)}, }; -static const ctl_named_node_t prof_stats_node[] = { - {NAME("bins"), CHILD(indexed, prof_stats_bins)}, - {NAME("lextents"), CHILD(indexed, prof_stats_lextents)}, -}; - -static const ctl_named_node_t prof_node[] = { - {NAME("thread_active_init"), CTL(prof_thread_active_init)}, - {NAME("active"), CTL(prof_active)}, - {NAME("dump"), CTL(prof_dump)}, - {NAME("gdump"), CTL(prof_gdump)}, - {NAME("prefix"), CTL(prof_prefix)}, - {NAME("reset"), CTL(prof_reset)}, - {NAME("interval"), CTL(prof_interval)}, - {NAME("lg_sample"), CTL(lg_prof_sample)}, - {NAME("log_start"), CTL(prof_log_start)}, - {NAME("log_stop"), CTL(prof_log_stop)}, - {NAME("stats"), CHILD(named, prof_stats)} -}; +static const ctl_named_node_t prof_node[] = { + {NAME("thread_active_init"), CTL(prof_thread_active_init)}, + {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, + {NAME("gdump"), CTL(prof_gdump)}, {NAME("prefix"), CTL(prof_prefix)}, + {NAME("reset"), CTL(prof_reset)}, {NAME("interval"), CTL(prof_interval)}, + {NAME("lg_sample"), CTL(lg_prof_sample)}, + {NAME("log_start"), CTL(prof_log_start)}, + {NAME("log_stop"), CTL(prof_log_stop)}, + {NAME("stats"), CHILD(named, prof_stats)}}; static const ctl_named_node_t stats_arenas_i_small_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}, - {NAME("nfills"), CTL(stats_arenas_i_small_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)} -}; + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}, + {NAME("nfills"), CTL(stats_arenas_i_small_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)}}; static const ctl_named_node_t stats_arenas_i_large_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}, - {NAME("nfills"), CTL(stats_arenas_i_large_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)} -}; + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}, + {NAME("nfills"), CTL(stats_arenas_i_large_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)}}; -#define MUTEX_PROF_DATA_NODE(prefix) \ -static const ctl_named_node_t stats_##prefix##_node[] = { \ - {NAME("num_ops"), \ - CTL(stats_##prefix##_num_ops)}, \ - {NAME("num_wait"), \ - CTL(stats_##prefix##_num_wait)}, \ - {NAME("num_spin_acq"), \ - CTL(stats_##prefix##_num_spin_acq)}, \ - {NAME("num_owner_switch"), \ - CTL(stats_##prefix##_num_owner_switch)}, \ - {NAME("total_wait_time"), \ - CTL(stats_##prefix##_total_wait_time)}, \ - {NAME("max_wait_time"), \ - CTL(stats_##prefix##_max_wait_time)}, \ - {NAME("max_num_thds"), \ - CTL(stats_##prefix##_max_num_thds)} \ - /* Note that # of current waiting thread not provided. */ \ -}; +#define MUTEX_PROF_DATA_NODE(prefix) \ + static const ctl_named_node_t stats_##prefix##_node[] = { \ + {NAME("num_ops"), CTL(stats_##prefix##_num_ops)}, \ + {NAME("num_wait"), CTL(stats_##prefix##_num_wait)}, \ + {NAME("num_spin_acq"), CTL(stats_##prefix##_num_spin_acq)}, \ + {NAME("num_owner_switch"), \ + CTL(stats_##prefix##_num_owner_switch)}, \ + {NAME("total_wait_time"), CTL(stats_##prefix##_total_wait_time)}, \ + {NAME("max_wait_time"), CTL(stats_##prefix##_max_wait_time)}, \ + {NAME("max_num_thds"), \ + CTL(stats_##prefix##_max_num_thds)} /* Note that # of current waiting thread not provided. */ \ + }; MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, - {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, - {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, - {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, - {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, - {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)}, - {NAME("batch_pops"), - CTL(stats_arenas_i_bins_j_batch_pops)}, - {NAME("batch_failed_pushes"), - CTL(stats_arenas_i_bins_j_batch_failed_pushes)}, - {NAME("batch_pushes"), - CTL(stats_arenas_i_bins_j_batch_pushes)}, - {NAME("batch_pushed_elems"), - CTL(stats_arenas_i_bins_j_batch_pushed_elems)}, - {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} -}; + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, + {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, + {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, + {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)}, + {NAME("batch_pops"), CTL(stats_arenas_i_bins_j_batch_pops)}, + {NAME("batch_failed_pushes"), + CTL(stats_arenas_i_bins_j_batch_failed_pushes)}, + {NAME("batch_pushes"), CTL(stats_arenas_i_bins_j_batch_pushes)}, + {NAME("batch_pushed_elems"), CTL(stats_arenas_i_bins_j_batch_pushed_elems)}, + {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}}; static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_bins_j)} -}; + {NAME(""), CHILD(named, stats_arenas_i_bins_j)}}; static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { - {INDEX(stats_arenas_i_bins_j)} -}; + {INDEX(stats_arenas_i_bins_j)}}; static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, - {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} -}; + {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, + {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}}; static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} -}; + {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}}; static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { - {INDEX(stats_arenas_i_lextents_j)} -}; + {INDEX(stats_arenas_i_lextents_j)}}; static const ctl_named_node_t stats_arenas_i_extents_j_node[] = { - {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)}, - {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)}, - {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)}, - {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)}, - {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)}, - {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)} -}; + {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)}, + {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)}, + {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)}, + {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)}, + {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)}, + {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}}; static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_extents_j)} -}; + {NAME(""), CHILD(named, stats_arenas_i_extents_j)}}; static const ctl_indexed_node_t stats_arenas_i_extents_node[] = { - {INDEX(stats_arenas_i_extents_j)} -}; + {INDEX(stats_arenas_i_extents_j)}}; -#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) +#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) MUTEX_PROF_ARENA_MUTEXES #undef OP static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, -MUTEX_PROF_ARENA_MUTEXES + MUTEX_PROF_ARENA_MUTEXES #undef OP }; static const ctl_named_node_t stats_arenas_i_hpa_shard_slabs_node[] = { - {NAME("npageslabs_nonhuge"), - CTL(stats_arenas_i_hpa_shard_slabs_npageslabs_nonhuge)}, - {NAME("npageslabs_huge"), - CTL(stats_arenas_i_hpa_shard_slabs_npageslabs_huge)}, - {NAME("nactive_nonhuge"), - CTL(stats_arenas_i_hpa_shard_slabs_nactive_nonhuge)}, - {NAME("nactive_huge"), - CTL(stats_arenas_i_hpa_shard_slabs_nactive_huge)}, - {NAME("ndirty_nonhuge"), - CTL(stats_arenas_i_hpa_shard_slabs_ndirty_nonhuge)}, - {NAME("ndirty_huge"), - CTL(stats_arenas_i_hpa_shard_slabs_ndirty_huge)} -}; + {NAME("npageslabs_nonhuge"), + CTL(stats_arenas_i_hpa_shard_slabs_npageslabs_nonhuge)}, + {NAME("npageslabs_huge"), + CTL(stats_arenas_i_hpa_shard_slabs_npageslabs_huge)}, + {NAME("nactive_nonhuge"), + CTL(stats_arenas_i_hpa_shard_slabs_nactive_nonhuge)}, + {NAME("nactive_huge"), CTL(stats_arenas_i_hpa_shard_slabs_nactive_huge)}, + {NAME("ndirty_nonhuge"), + CTL(stats_arenas_i_hpa_shard_slabs_ndirty_nonhuge)}, + {NAME("ndirty_huge"), CTL(stats_arenas_i_hpa_shard_slabs_ndirty_huge)}}; static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = { - {NAME("npageslabs_nonhuge"), - CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)}, - {NAME("npageslabs_huge"), - CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)}, - {NAME("nactive_nonhuge"), - CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)}, - {NAME("nactive_huge"), - CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)}, - {NAME("ndirty_nonhuge"), - CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)}, - {NAME("ndirty_huge"), - CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)} -}; + {NAME("npageslabs_nonhuge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)}, + {NAME("npageslabs_huge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)}, + {NAME("nactive_nonhuge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)}, + {NAME("nactive_huge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)}, + {NAME("ndirty_nonhuge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)}, + {NAME("ndirty_huge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}}; static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = { - {NAME("npageslabs_nonhuge"), - CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)}, - {NAME("npageslabs_huge"), - CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)}, - {NAME("nactive_nonhuge"), - CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)}, - {NAME("nactive_huge"), - CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)}, - {NAME("ndirty_nonhuge"), - CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)}, - {NAME("ndirty_huge"), - CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)} -}; + {NAME("npageslabs_nonhuge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)}, + {NAME("npageslabs_huge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)}, + {NAME("nactive_nonhuge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)}, + {NAME("nactive_huge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)}, + {NAME("ndirty_nonhuge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)}, + {NAME("ndirty_huge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}}; -static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = { - {NAME("npageslabs_nonhuge"), - CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)}, - {NAME("npageslabs_huge"), - CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)}, - {NAME("nactive_nonhuge"), - CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)}, - {NAME("nactive_huge"), - CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)}, - {NAME("ndirty_nonhuge"), - CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)}, - {NAME("ndirty_huge"), - CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)} -}; +static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = + {{NAME("npageslabs_nonhuge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)}, + {NAME("npageslabs_huge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)}, + {NAME("nactive_nonhuge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)}, + {NAME("nactive_huge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)}, + {NAME("ndirty_nonhuge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)}, + {NAME("ndirty_huge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}}; -static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = { - {NAME(""), - CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)} -}; +static const ctl_named_node_t + super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}}; static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] = -{ - {INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)} -}; + {{INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}}; static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = { - {NAME("npageslabs"), CTL(stats_arenas_i_hpa_shard_npageslabs)}, - {NAME("nactive"), CTL(stats_arenas_i_hpa_shard_nactive)}, - {NAME("ndirty"), CTL(stats_arenas_i_hpa_shard_ndirty)}, + {NAME("npageslabs"), CTL(stats_arenas_i_hpa_shard_npageslabs)}, + {NAME("nactive"), CTL(stats_arenas_i_hpa_shard_nactive)}, + {NAME("ndirty"), CTL(stats_arenas_i_hpa_shard_ndirty)}, - {NAME("slabs"), CHILD(named, stats_arenas_i_hpa_shard_slabs)}, + {NAME("slabs"), CHILD(named, stats_arenas_i_hpa_shard_slabs)}, - {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)}, - {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)}, - {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)}, - {NAME("nhugify_failures"), - CTL(stats_arenas_i_hpa_shard_nhugify_failures)}, - {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}, + {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)}, + {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)}, + {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)}, + {NAME("nhugify_failures"), CTL(stats_arenas_i_hpa_shard_nhugify_failures)}, + {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}, - {NAME("full_slabs"), CHILD(named, - stats_arenas_i_hpa_shard_full_slabs)}, - {NAME("empty_slabs"), CHILD(named, - stats_arenas_i_hpa_shard_empty_slabs)}, - {NAME("nonfull_slabs"), CHILD(indexed, - stats_arenas_i_hpa_shard_nonfull_slabs)} -}; + {NAME("full_slabs"), CHILD(named, stats_arenas_i_hpa_shard_full_slabs)}, + {NAME("empty_slabs"), CHILD(named, stats_arenas_i_hpa_shard_empty_slabs)}, + {NAME("nonfull_slabs"), + CHILD(indexed, stats_arenas_i_hpa_shard_nonfull_slabs)}}; static const ctl_named_node_t stats_arenas_i_node[] = { - {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, - {NAME("uptime"), CTL(stats_arenas_i_uptime)}, - {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, - {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, - {NAME("pactive"), CTL(stats_arenas_i_pactive)}, - {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, - {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, - {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("retained"), CTL(stats_arenas_i_retained)}, - {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)}, - {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, - {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, - {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, - {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, - {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, - {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, - {NAME("base"), CTL(stats_arenas_i_base)}, - {NAME("internal"), CTL(stats_arenas_i_internal)}, - {NAME("metadata_edata"), CTL(stats_arenas_i_metadata_edata)}, - {NAME("metadata_rtree"), CTL(stats_arenas_i_metadata_rtree)}, - {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, - {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, - {NAME("tcache_stashed_bytes"), - CTL(stats_arenas_i_tcache_stashed_bytes)}, - {NAME("resident"), CTL(stats_arenas_i_resident)}, - {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)}, - {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)}, - {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, - {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)}, - {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}, - {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)} -}; + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("uptime"), CTL(stats_arenas_i_uptime)}, + {NAME("dss"), CTL(stats_arenas_i_dss)}, + {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, + {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)}, + {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, + {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, + {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, + {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, + {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, + {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, + {NAME("base"), CTL(stats_arenas_i_base)}, + {NAME("internal"), CTL(stats_arenas_i_internal)}, + {NAME("metadata_edata"), CTL(stats_arenas_i_metadata_edata)}, + {NAME("metadata_rtree"), CTL(stats_arenas_i_metadata_rtree)}, + {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, + {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, + {NAME("tcache_stashed_bytes"), CTL(stats_arenas_i_tcache_stashed_bytes)}, + {NAME("resident"), CTL(stats_arenas_i_resident)}, + {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)}, + {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)}, + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, + {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)}, + {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}, + {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}}; static const ctl_named_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(named, stats_arenas_i)} -}; + {NAME(""), CHILD(named, stats_arenas_i)}}; -static const ctl_indexed_node_t stats_arenas_node[] = { - {INDEX(stats_arenas_i)} -}; +static const ctl_indexed_node_t stats_arenas_node[] = {{INDEX(stats_arenas_i)}}; static const ctl_named_node_t stats_background_thread_node[] = { - {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, - {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, - {NAME("run_interval"), CTL(stats_background_thread_run_interval)} -}; + {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, + {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, + {NAME("run_interval"), CTL(stats_background_thread_run_interval)}}; #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) MUTEX_PROF_GLOBAL_MUTEXES @@ -945,95 +851,81 @@ MUTEX_PROF_GLOBAL_MUTEXES static const ctl_named_node_t stats_mutexes_node[] = { #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, -MUTEX_PROF_GLOBAL_MUTEXES + MUTEX_PROF_GLOBAL_MUTEXES #undef OP - {NAME("reset"), CTL(stats_mutexes_reset)} -}; + {NAME("reset"), CTL(stats_mutexes_reset)}}; #undef MUTEX_PROF_DATA_NODE static const ctl_named_node_t stats_node[] = { - {NAME("allocated"), CTL(stats_allocated)}, - {NAME("active"), CTL(stats_active)}, - {NAME("metadata"), CTL(stats_metadata)}, - {NAME("metadata_edata"), CTL(stats_metadata_edata)}, - {NAME("metadata_rtree"), CTL(stats_metadata_rtree)}, - {NAME("metadata_thp"), CTL(stats_metadata_thp)}, - {NAME("resident"), CTL(stats_resident)}, - {NAME("mapped"), CTL(stats_mapped)}, - {NAME("retained"), CTL(stats_retained)}, - {NAME("background_thread"), - CHILD(named, stats_background_thread)}, - {NAME("mutexes"), CHILD(named, stats_mutexes)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)}, - {NAME("zero_reallocs"), CTL(stats_zero_reallocs)}, + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("metadata"), CTL(stats_metadata)}, + {NAME("metadata_edata"), CTL(stats_metadata_edata)}, + {NAME("metadata_rtree"), CTL(stats_metadata_rtree)}, + {NAME("metadata_thp"), CTL(stats_metadata_thp)}, + {NAME("resident"), CTL(stats_resident)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, + {NAME("background_thread"), CHILD(named, stats_background_thread)}, + {NAME("mutexes"), CHILD(named, stats_mutexes)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)}, + {NAME("zero_reallocs"), CTL(stats_zero_reallocs)}, }; static const ctl_named_node_t experimental_hooks_node[] = { - {NAME("install"), CTL(experimental_hooks_install)}, - {NAME("remove"), CTL(experimental_hooks_remove)}, - {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)}, - {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)}, - {NAME("prof_sample"), CTL(experimental_hooks_prof_sample)}, - {NAME("prof_sample_free"), CTL(experimental_hooks_prof_sample_free)}, - {NAME("prof_threshold"), CTL(experimental_hooks_prof_threshold)}, - {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)}, - {NAME("thread_event"), CTL(experimental_hooks_thread_event)}, + {NAME("install"), CTL(experimental_hooks_install)}, + {NAME("remove"), CTL(experimental_hooks_remove)}, + {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)}, + {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)}, + {NAME("prof_sample"), CTL(experimental_hooks_prof_sample)}, + {NAME("prof_sample_free"), CTL(experimental_hooks_prof_sample_free)}, + {NAME("prof_threshold"), CTL(experimental_hooks_prof_threshold)}, + {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)}, + {NAME("thread_event"), CTL(experimental_hooks_thread_event)}, }; static const ctl_named_node_t experimental_thread_node[] = { - {NAME("activity_callback"), - CTL(experimental_thread_activity_callback)} -}; + {NAME("activity_callback"), CTL(experimental_thread_activity_callback)}}; static const ctl_named_node_t experimental_utilization_node[] = { - {NAME("query"), CTL(experimental_utilization_query)}, - {NAME("batch_query"), CTL(experimental_utilization_batch_query)} -}; + {NAME("query"), CTL(experimental_utilization_query)}, + {NAME("batch_query"), CTL(experimental_utilization_batch_query)}}; static const ctl_named_node_t experimental_arenas_i_node[] = { - {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)} -}; + {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)}}; static const ctl_named_node_t super_experimental_arenas_i_node[] = { - {NAME(""), CHILD(named, experimental_arenas_i)} -}; + {NAME(""), CHILD(named, experimental_arenas_i)}}; static const ctl_indexed_node_t experimental_arenas_node[] = { - {INDEX(experimental_arenas_i)} -}; + {INDEX(experimental_arenas_i)}}; static const ctl_named_node_t experimental_prof_recent_node[] = { - {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)}, - {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)}, + {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)}, + {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)}, }; static const ctl_named_node_t experimental_node[] = { - {NAME("hooks"), CHILD(named, experimental_hooks)}, - {NAME("utilization"), CHILD(named, experimental_utilization)}, - {NAME("arenas"), CHILD(indexed, experimental_arenas)}, - {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)}, - {NAME("prof_recent"), CHILD(named, experimental_prof_recent)}, - {NAME("batch_alloc"), CTL(experimental_batch_alloc)}, - {NAME("thread"), CHILD(named, experimental_thread)} -}; + {NAME("hooks"), CHILD(named, experimental_hooks)}, + {NAME("utilization"), CHILD(named, experimental_utilization)}, + {NAME("arenas"), CHILD(indexed, experimental_arenas)}, + {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)}, + {NAME("prof_recent"), CHILD(named, experimental_prof_recent)}, + {NAME("batch_alloc"), CTL(experimental_batch_alloc)}, + {NAME("thread"), CHILD(named, experimental_thread)}}; -static const ctl_named_node_t root_node[] = { - {NAME("version"), CTL(version)}, - {NAME("epoch"), CTL(epoch)}, - {NAME("background_thread"), CTL(background_thread)}, - {NAME("max_background_threads"), CTL(max_background_threads)}, - {NAME("thread"), CHILD(named, thread)}, - {NAME("config"), CHILD(named, config)}, - {NAME("opt"), CHILD(named, opt)}, - {NAME("tcache"), CHILD(named, tcache)}, - {NAME("arena"), CHILD(indexed, arena)}, - {NAME("arenas"), CHILD(named, arenas)}, - {NAME("prof"), CHILD(named, prof)}, - {NAME("stats"), CHILD(named, stats)}, - {NAME("experimental"), CHILD(named, experimental)} -}; +static const ctl_named_node_t root_node[] = {{NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, + {NAME("background_thread"), CTL(background_thread)}, + {NAME("max_background_threads"), CTL(max_background_threads)}, + {NAME("thread"), CHILD(named, thread)}, + {NAME("config"), CHILD(named, config)}, {NAME("opt"), CHILD(named, opt)}, + {NAME("tcache"), CHILD(named, tcache)}, + {NAME("arena"), CHILD(indexed, arena)}, + {NAME("arenas"), CHILD(named, arenas)}, {NAME("prof"), CHILD(named, prof)}, + {NAME("stats"), CHILD(named, stats)}, + {NAME("experimental"), CHILD(named, experimental)}}; static const ctl_named_node_t super_root_node[] = { - {NAME(""), CHILD(named, root)} -}; + {NAME(""), CHILD(named, root)}}; #undef NAME #undef CHILD @@ -1048,8 +940,7 @@ static const ctl_named_node_t super_root_node[] = { */ static void ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) { - locked_inc_u64_unsynchronized(dst, - locked_read_u64_unsynchronized(src)); + locked_inc_u64_unsynchronized(dst, locked_read_u64_unsynchronized(src)); } static void @@ -1089,8 +980,8 @@ arenas_i2a_impl(size_t i, bool compat, bool validate) { * more than one past the range of indices that have * initialized ctl data. */ - assert(i < ctl_arenas->narenas || (!validate && i == - ctl_arenas->narenas)); + assert(i < ctl_arenas->narenas + || (!validate && i == ctl_arenas->narenas)); a = (unsigned)i + 2; } break; @@ -1114,12 +1005,12 @@ arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { if (init && ret == NULL) { if (config_stats) { struct container_s { - ctl_arena_t ctl_arena; - ctl_arena_stats_t astats; + ctl_arena_t ctl_arena; + ctl_arena_stats_t astats; }; - struct container_s *cont = - (struct container_s *)base_alloc(tsd_tsdn(tsd), - b0get(), sizeof(struct container_s), QUANTUM); + struct container_s *cont = (struct container_s *) + base_alloc(tsd_tsdn(tsd), b0get(), + sizeof(struct container_s), QUANTUM); if (cont == NULL) { return NULL; } @@ -1177,8 +1068,8 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { for (i = 0; i < SC_NBINS; i++) { bin_stats_t *bstats = &ctl_arena->astats->bstats[i].stats_data; - ctl_arena->astats->allocated_small += bstats->curregs * - sz_index2size(i); + ctl_arena->astats->allocated_small += bstats->curregs + * sz_index2size(i); ctl_arena->astats->nmalloc_small += bstats->nmalloc; ctl_arena->astats->ndalloc_small += bstats->ndalloc; ctl_arena->astats->nrequests_small += bstats->nrequests; @@ -1194,8 +1085,8 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { } static void -ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, - bool destroyed) { +ctl_arena_stats_sdmerge( + ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, bool destroyed) { unsigned i; if (!destroyed) { @@ -1216,52 +1107,59 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, if (!destroyed) { sdstats->astats.mapped += astats->astats.mapped; - sdstats->astats.pa_shard_stats.pac_stats.retained - += astats->astats.pa_shard_stats.pac_stats.retained; - sdstats->astats.pa_shard_stats.edata_avail - += astats->astats.pa_shard_stats.edata_avail; + sdstats->astats.pa_shard_stats.pac_stats.retained += + astats->astats.pa_shard_stats.pac_stats.retained; + sdstats->astats.pa_shard_stats.edata_avail += + astats->astats.pa_shard_stats.edata_avail; } - ctl_accum_locked_u64( - &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge, - &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge); - ctl_accum_locked_u64( - &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise, - &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise); - ctl_accum_locked_u64( - &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged, - &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged); + ctl_accum_locked_u64(&sdstats->astats.pa_shard_stats.pac_stats + .decay_dirty.npurge, + &astats->astats.pa_shard_stats.pac_stats.decay_dirty + .npurge); + ctl_accum_locked_u64(&sdstats->astats.pa_shard_stats.pac_stats + .decay_dirty.nmadvise, + &astats->astats.pa_shard_stats.pac_stats.decay_dirty + .nmadvise); + ctl_accum_locked_u64(&sdstats->astats.pa_shard_stats.pac_stats + .decay_dirty.purged, + &astats->astats.pa_shard_stats.pac_stats.decay_dirty + .purged); - ctl_accum_locked_u64( - &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge, - &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge); - ctl_accum_locked_u64( - &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise, - &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise); - ctl_accum_locked_u64( - &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged, - &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged); + ctl_accum_locked_u64(&sdstats->astats.pa_shard_stats.pac_stats + .decay_muzzy.npurge, + &astats->astats.pa_shard_stats.pac_stats.decay_muzzy + .npurge); + ctl_accum_locked_u64(&sdstats->astats.pa_shard_stats.pac_stats + .decay_muzzy.nmadvise, + &astats->astats.pa_shard_stats.pac_stats.decay_muzzy + .nmadvise); + ctl_accum_locked_u64(&sdstats->astats.pa_shard_stats.pac_stats + .decay_muzzy.purged, + &astats->astats.pa_shard_stats.pac_stats.decay_muzzy + .purged); -#define OP(mtx) malloc_mutex_prof_merge( \ - &(sdstats->astats.mutex_prof_data[ \ - arena_prof_mutex_##mtx]), \ - &(astats->astats.mutex_prof_data[ \ - arena_prof_mutex_##mtx])); -MUTEX_PROF_ARENA_MUTEXES +#define OP(mtx) \ + malloc_mutex_prof_merge( \ + &(sdstats->astats.mutex_prof_data[arena_prof_mutex_##mtx]), \ + &(astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])); + MUTEX_PROF_ARENA_MUTEXES #undef OP if (!destroyed) { sdstats->astats.base += astats->astats.base; - sdstats->astats.metadata_edata += astats->astats - .metadata_edata; - sdstats->astats.metadata_rtree += astats->astats - .metadata_rtree; + sdstats->astats.metadata_edata += + astats->astats.metadata_edata; + sdstats->astats.metadata_rtree += + astats->astats.metadata_rtree; sdstats->astats.resident += astats->astats.resident; - sdstats->astats.metadata_thp += astats->astats.metadata_thp; + sdstats->astats.metadata_thp += + astats->astats.metadata_thp; ctl_accum_atomic_zu(&sdstats->astats.internal, &astats->astats.internal); } else { assert(atomic_load_zu( - &astats->astats.internal, ATOMIC_RELAXED) == 0); + &astats->astats.internal, ATOMIC_RELAXED) + == 0); } if (!destroyed) { @@ -1283,8 +1181,8 @@ MUTEX_PROF_ARENA_MUTEXES } sdstats->astats.nmalloc_large += astats->astats.nmalloc_large; sdstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sdstats->astats.nrequests_large - += astats->astats.nrequests_large; + sdstats->astats.nrequests_large += + astats->astats.nrequests_large; sdstats->astats.nflushes_large += astats->astats.nflushes_large; ctl_accum_atomic_zu( &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm, @@ -1322,14 +1220,12 @@ MUTEX_PROF_ARENA_MUTEXES assert(bstats->nonfull_slabs == 0); } - merged->batch_pops - += bstats->batch_pops; - merged->batch_failed_pushes - += bstats->batch_failed_pushes; - merged->batch_pushes - += bstats->batch_pushes; - merged->batch_pushed_elems - += bstats->batch_pushed_elems; + merged->batch_pops += bstats->batch_pops; + merged->batch_failed_pushes += + bstats->batch_failed_pushes; + merged->batch_pushes += bstats->batch_pushes; + merged->batch_pushed_elems += + bstats->batch_pushed_elems; malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, &astats->bstats[i].mutex_data); @@ -1355,14 +1251,14 @@ MUTEX_PROF_ARENA_MUTEXES for (i = 0; i < SC_NPSIZES; i++) { sdstats->estats[i].ndirty += astats->estats[i].ndirty; sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy; - sdstats->estats[i].nretained - += astats->estats[i].nretained; - sdstats->estats[i].dirty_bytes - += astats->estats[i].dirty_bytes; - sdstats->estats[i].muzzy_bytes - += astats->estats[i].muzzy_bytes; - sdstats->estats[i].retained_bytes - += astats->estats[i].retained_bytes; + sdstats->estats[i].nretained += + astats->estats[i].nretained; + sdstats->estats[i].dirty_bytes += + astats->estats[i].dirty_bytes; + sdstats->estats[i].muzzy_bytes += + astats->estats[i].muzzy_bytes; + sdstats->estats[i].retained_bytes += + astats->estats[i].retained_bytes; } /* Merge HPA stats. */ @@ -1384,11 +1280,11 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, static unsigned ctl_arena_init(tsd_t *tsd, const arena_config_t *config) { - unsigned arena_ind; + unsigned arena_ind; ctl_arena_t *ctl_arena; - if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != - NULL) { + if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) + != NULL) { ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); arena_ind = ctl_arena->arena_ind; } else { @@ -1415,8 +1311,8 @@ ctl_arena_init(tsd_t *tsd, const arena_config_t *config) { static void ctl_background_thread_stats_read(tsdn_t *tsdn) { background_thread_stats_t *stats = &ctl_stats->background_thread; - if (!have_background_thread || - background_thread_stats_read(tsdn, stats)) { + if (!have_background_thread + || background_thread_stats_read(tsdn, stats)) { memset(stats, 0, sizeof(background_thread_stats_t)); nstime_init_zero(&stats->run_interval); } @@ -1452,39 +1348,39 @@ ctl_refresh(tsdn_t *tsdn) { for (unsigned i = 0; i < narenas; i++) { ctl_arena_t *ctl_arena = arenas_i(i); - bool initialized = (tarenas[i] != NULL); + bool initialized = (tarenas[i] != NULL); ctl_arena->initialized = initialized; if (initialized) { - ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, - false); + ctl_arena_refresh( + tsdn, tarenas[i], ctl_sarena, i, false); } } if (config_stats) { - ctl_stats->allocated = ctl_sarena->astats->allocated_small + - ctl_sarena->astats->astats.allocated_large; + ctl_stats->allocated = ctl_sarena->astats->allocated_small + + ctl_sarena->astats->astats.allocated_large; ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); - ctl_stats->metadata = ctl_sarena->astats->astats.base + - atomic_load_zu(&ctl_sarena->astats->astats.internal, - ATOMIC_RELAXED); - ctl_stats->metadata_edata = ctl_sarena->astats->astats - .metadata_edata; - ctl_stats->metadata_rtree = ctl_sarena->astats->astats - .metadata_rtree; + ctl_stats->metadata = ctl_sarena->astats->astats.base + + atomic_load_zu( + &ctl_sarena->astats->astats.internal, ATOMIC_RELAXED); + ctl_stats->metadata_edata = + ctl_sarena->astats->astats.metadata_edata; + ctl_stats->metadata_rtree = + ctl_sarena->astats->astats.metadata_rtree; ctl_stats->resident = ctl_sarena->astats->astats.resident; ctl_stats->metadata_thp = ctl_sarena->astats->astats.metadata_thp; ctl_stats->mapped = ctl_sarena->astats->astats.mapped; - ctl_stats->retained = ctl_sarena->astats->astats - .pa_shard_stats.pac_stats.retained; + ctl_stats->retained = ctl_sarena->astats->astats.pa_shard_stats + .pac_stats.retained; ctl_background_thread_stats_read(tsdn); -#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ - malloc_mutex_lock(tsdn, &mtx); \ - malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ - malloc_mutex_unlock(tsdn, &mtx); +#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); if (config_prof && opt_prof) { READ_GLOBAL_MUTEX_PROF_DATA( @@ -1507,9 +1403,9 @@ ctl_refresh(tsdn_t *tsdn) { global_prof_mutex_background_thread, background_thread_lock); } else { - memset(&ctl_stats->mutex_prof_data[ - global_prof_mutex_background_thread], 0, - sizeof(mutex_prof_data_t)); + memset(&ctl_stats->mutex_prof_data + [global_prof_mutex_background_thread], + 0, sizeof(mutex_prof_data_t)); } /* We own ctl mutex already. */ malloc_mutex_prof_read(tsdn, @@ -1522,21 +1418,21 @@ ctl_refresh(tsdn_t *tsdn) { static bool ctl_init(tsd_t *tsd) { - bool ret; + bool ret; tsdn_t *tsdn = tsd_tsdn(tsd); malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { ctl_arena_t *ctl_sarena, *ctl_darena; - unsigned i; + unsigned i; /* * Allocate demand-zeroed space for pointers to the full * range of supported arena indices. */ if (ctl_arenas == NULL) { - ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, - b0get(), sizeof(ctl_arenas_t), QUANTUM); + ctl_arenas = (ctl_arenas_t *)base_alloc( + tsdn, b0get(), sizeof(ctl_arenas_t), QUANTUM); if (ctl_arenas == NULL) { ret = true; goto label_return; @@ -1544,8 +1440,8 @@ ctl_init(tsd_t *tsd) { } if (config_stats && ctl_stats == NULL) { - ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), - sizeof(ctl_stats_t), QUANTUM); + ctl_stats = (ctl_stats_t *)base_alloc( + tsdn, b0get(), sizeof(ctl_stats_t), QUANTUM); if (ctl_stats == NULL) { ret = true; goto label_return; @@ -1557,15 +1453,17 @@ ctl_init(tsd_t *tsd) { * here rather than doing it lazily elsewhere, in order * to limit when OOM-caused errors can occur. */ - if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, - true)) == NULL) { + if ((ctl_sarena = arenas_i_impl( + tsd, MALLCTL_ARENAS_ALL, false, true)) + == NULL) { ret = true; goto label_return; } ctl_sarena->initialized = true; - if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, - false, true)) == NULL) { + if ((ctl_darena = arenas_i_impl( + tsd, MALLCTL_ARENAS_DESTROYED, false, true)) + == NULL) { ret = true; goto label_return; } @@ -1600,9 +1498,9 @@ static int ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node, const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp, size_t *depthp) { - int ret; - const char *elm, *tdot, *dot; - size_t elen, i, j; + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; const ctl_named_node_t *node; elm = name; @@ -1624,8 +1522,8 @@ ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node, for (j = 0; j < node->nchildren; j++) { const ctl_named_node_t *child = ctl_named_children(node, j); - if (strlen(child->name) == elen && - strncmp(elm, child->name, elen) == 0) { + if (strlen(child->name) == elen + && strncmp(elm, child->name, elen) == 0) { node = child; mibp[i] = j; break; @@ -1636,7 +1534,7 @@ ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node, goto label_return; } } else { - uintmax_t index; + uintmax_t index; const ctl_indexed_node_t *inode; /* Children are indexed. */ @@ -1674,8 +1572,8 @@ ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node, /* Update elm. */ elm = &dot[1]; - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : - strchr(elm, '\0'); + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot + : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); } if (ending_nodep != NULL) { @@ -1690,9 +1588,9 @@ label_return: int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - size_t depth; - size_t mib[CTL_MAX_DEPTH]; + int ret; + size_t depth; + size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; if (!ctl_initialized && ctl_init(tsd)) { @@ -1701,8 +1599,8 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib, - &depth); + ret = ctl_lookup( + tsd_tsdn(tsd), super_root_node, name, &node, mib, &depth); if (ret != 0) { goto label_return; } @@ -1715,7 +1613,7 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, } label_return: - return(ret); + return (ret); } int @@ -1727,10 +1625,10 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { goto label_return; } - ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp, - miblenp); + ret = ctl_lookup( + tsd_tsdn(tsd), super_root_node, name, NULL, mibp, miblenp); label_return: - return(ret); + return (ret); } static int @@ -1766,13 +1664,13 @@ ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep, ret = 0; label_return: - return(ret); + return (ret); } int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; const ctl_named_node_t *node; if (!ctl_initialized && ctl_init(tsd)) { @@ -1794,13 +1692,13 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, } label_return: - return(ret); + return (ret); } int -ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, - size_t *miblenp) { - int ret; +ctl_mibnametomib( + tsd_t *tsd, size_t *mib, size_t miblen, const char *name, size_t *miblenp) { + int ret; const ctl_named_node_t *node; if (!ctl_initialized && ctl_init(tsd)) { @@ -1820,17 +1718,17 @@ ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, assert(miblenp != NULL); assert(*miblenp >= miblen); *miblenp -= miblen; - ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen, - miblenp); + ret = ctl_lookup( + tsd_tsdn(tsd), node, name, NULL, mib + miblen, miblenp); *miblenp += miblen; label_return: - return(ret); + return (ret); } int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; const ctl_named_node_t *node; if (!ctl_initialized && ctl_init(tsd)) { @@ -1853,29 +1751,29 @@ ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, /* * The same node supplies the starting node and stores the ending node. */ - ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen, - miblenp); + ret = ctl_lookup( + tsd_tsdn(tsd), node, name, &node, mib + miblen, miblenp); *miblenp += miblen; if (ret != 0) { goto label_return; } if (node != NULL && node->ctl) { - ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp, - newlen); + ret = node->ctl( + tsd, mib, *miblenp, oldp, oldlenp, newp, newlen); } else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } label_return: - return(ret); + return (ret); } bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { return true; } @@ -1907,195 +1805,201 @@ ctl_mtx_assert_held(tsdn_t *tsdn) { /******************************************************************************/ /* *_ctl() functions. */ -#define READONLY() do { \ - if (newp != NULL || newlen != 0) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) +#define READONLY() \ + do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ + } while (0) -#define WRITEONLY() do { \ - if (oldp != NULL || oldlenp != NULL) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) +#define WRITEONLY() \ + do { \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto label_return; \ + } \ + } while (0) /* Can read or write, but not both. */ -#define READ_XOR_WRITE() do { \ - if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ - newlen != 0)) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) +#define READ_XOR_WRITE() \ + do { \ + if ((oldp != NULL && oldlenp != NULL) \ + && (newp != NULL || newlen != 0)) { \ + ret = EPERM; \ + goto label_return; \ + } \ + } while (0) /* Can neither read nor write. */ -#define NEITHER_READ_NOR_WRITE() do { \ - if (oldp != NULL || oldlenp != NULL || newp != NULL || \ - newlen != 0) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) +#define NEITHER_READ_NOR_WRITE() \ + do { \ + if (oldp != NULL || oldlenp != NULL || newp != NULL \ + || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ + } while (0) /* Verify that the space provided is enough. */ -#define VERIFY_READ(t) do { \ - if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \ - if (oldlenp != NULL) { \ - *oldlenp = 0; \ - } \ - ret = EINVAL; \ - goto label_return; \ - } \ -} while (0) +#define VERIFY_READ(t) \ + do { \ + if (oldp == NULL || oldlenp == NULL \ + || *oldlenp != sizeof(t)) { \ + if (oldlenp != NULL) { \ + *oldlenp = 0; \ + } \ + ret = EINVAL; \ + goto label_return; \ + } \ + } while (0) -#define READ(v, t) do { \ - if (oldp != NULL && oldlenp != NULL) { \ - if (*oldlenp != sizeof(t)) { \ - size_t copylen = (sizeof(t) <= *oldlenp) \ - ? sizeof(t) : *oldlenp; \ - memcpy(oldp, (void *)&(v), copylen); \ - *oldlenp = copylen; \ - ret = EINVAL; \ - goto label_return; \ - } \ - *(t *)oldp = (v); \ - } \ -} while (0) +#define READ(v, t) \ + do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) \ + : *oldlenp; \ + memcpy(oldp, (void *)&(v), copylen); \ + *oldlenp = copylen; \ + ret = EINVAL; \ + goto label_return; \ + } \ + *(t *)oldp = (v); \ + } \ + } while (0) -#define WRITE(v, t) do { \ - if (newp != NULL) { \ - if (newlen != sizeof(t)) { \ - ret = EINVAL; \ - goto label_return; \ - } \ - (v) = *(t *)newp; \ - } \ -} while (0) +#define WRITE(v, t) \ + do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ + } \ + } while (0) -#define ASSURED_WRITE(v, t) do { \ - if (newp == NULL || newlen != sizeof(t)) { \ - ret = EINVAL; \ - goto label_return; \ - } \ - (v) = *(t *)newp; \ -} while (0) +#define ASSURED_WRITE(v, t) \ + do { \ + if (newp == NULL || newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ + } while (0) -#define MIB_UNSIGNED(v, i) do { \ - if (mib[i] > UINT_MAX) { \ - ret = EFAULT; \ - goto label_return; \ - } \ - v = (unsigned)mib[i]; \ -} while (0) +#define MIB_UNSIGNED(v, i) \ + do { \ + if (mib[i] > UINT_MAX) { \ + ret = EFAULT; \ + goto label_return; \ + } \ + v = (unsigned)mib[i]; \ + } while (0) /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ -#define CTL_RO_CGEN(c, n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ - if (!(c)) { \ - return ENOENT; \ - } \ - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ - return ret; \ -} +#define CTL_RO_CGEN(c, n, v, t) \ + static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ + label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ + } -#define CTL_RO_GEN(n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ - return ret; \ -} +#define CTL_RO_GEN(n, v, t) \ + static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ + label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ + } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ - if (!(c)) { \ - return ENOENT; \ - } \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return ret; \ -} +#define CTL_RO_NL_CGEN(c, n, v, t) \ + static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ + label_return: \ + return ret; \ + } -#define CTL_RO_NL_GEN(n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return ret; \ -} +#define CTL_RO_NL_GEN(n, v, t) \ + static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ + label_return: \ + return ret; \ + } -#define CTL_RO_CONFIG_GEN(n, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = n; \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return ret; \ -} +#define CTL_RO_CONFIG_GEN(n, t) \ + static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = n; \ + READ(oldval, t); \ + \ + ret = 0; \ + label_return: \ + return ret; \ + } /******************************************************************************/ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int -epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; UNUSED uint64_t newval; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); @@ -2112,10 +2016,9 @@ label_return: } static int -background_thread_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) { - int ret; +background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; bool oldval; if (!have_background_thread) { @@ -2164,10 +2067,9 @@ label_return: } static int -max_background_threads_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; +max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; size_t oldval; if (!have_background_thread) { @@ -2193,8 +2095,7 @@ max_background_threads_ctl(tsd_t *tsd, const size_t *mib, ret = 0; goto label_return; } - if (newval > opt_max_background_threads || - newval == 0) { + if (newval > opt_max_background_threads || newval == 0) { ret = EINVAL; goto label_return; } @@ -2244,19 +2145,19 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool) -CTL_RO_NL_GEN(opt_debug_double_free_max_scan, - opt_debug_double_free_max_scan, unsigned) +CTL_RO_NL_GEN( + opt_debug_double_free_max_scan, opt_debug_double_free_max_scan, unsigned) CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool) CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool) /* HPA options. */ CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool) -CTL_RO_NL_GEN(opt_hpa_hugification_threshold, - opt_hpa_opts.hugification_threshold, size_t) +CTL_RO_NL_GEN( + opt_hpa_hugification_threshold, opt_hpa_opts.hugification_threshold, size_t) CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t) CTL_RO_NL_GEN(opt_hpa_hugify_sync, opt_hpa_opts.hugify_sync, bool) -CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms, - uint64_t) +CTL_RO_NL_GEN( + opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms, uint64_t) CTL_RO_NL_GEN(opt_experimental_hpa_max_purge_nhp, opt_hpa_opts.experimental_max_purge_nhp, ssize_t) @@ -2271,19 +2172,19 @@ CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t) CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t) CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t) CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t) -CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush, - size_t) -CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra, - size_t) +CTL_RO_NL_GEN( + opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush, size_t) +CTL_RO_NL_GEN( + opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra, size_t) CTL_RO_NL_GEN(opt_huge_arena_pac_thp, opt_huge_arena_pac_thp, bool) -CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], - const char *) +CTL_RO_NL_GEN( + opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], const char *) CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) -CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], - const char *) +CTL_RO_NL_GEN( + opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], const char *) CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t) CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t) CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) @@ -2302,65 +2203,66 @@ CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new, opt_experimental_infallible_new, bool) CTL_RO_NL_GEN(opt_experimental_tcache_gc, opt_experimental_tcache_gc, bool) CTL_RO_NL_GEN(opt_max_batched_size, opt_bin_info_max_batched_size, size_t) -CTL_RO_NL_GEN(opt_remote_free_max, opt_bin_info_remote_free_max, - size_t) -CTL_RO_NL_GEN(opt_remote_free_max_batch, opt_bin_info_remote_free_max_batch, - size_t) +CTL_RO_NL_GEN(opt_remote_free_max, opt_bin_info_remote_free_max, size_t) +CTL_RO_NL_GEN( + opt_remote_free_max_batch, opt_bin_info_remote_free_max_batch, size_t) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t) -CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min, - unsigned) -CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max, - unsigned) +CTL_RO_NL_GEN( + opt_tcache_nslots_small_min, opt_tcache_nslots_small_min, unsigned) +CTL_RO_NL_GEN( + opt_tcache_nslots_small_max, opt_tcache_nslots_small_max, unsigned) CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned) CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t) CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t) CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t) -CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div, - unsigned) -CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div, - unsigned) +CTL_RO_NL_GEN( + opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div, unsigned) +CTL_RO_NL_GEN( + opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div, unsigned) CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) -CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, - size_t) -CTL_RO_NL_GEN(opt_process_madvise_max_batch, opt_process_madvise_max_batch, - size_t) +CTL_RO_NL_GEN( + opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, size_t) +CTL_RO_NL_GEN( + opt_process_madvise_max_batch, opt_process_madvise_max_batch, size_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, - opt_prof_thread_active_init, bool) +CTL_RO_NL_CGEN( + config_prof, opt_prof_thread_active_init, opt_prof_thread_active_init, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_bt_max, opt_prof_bt_max, unsigned) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_NL_CGEN(config_prof, opt_experimental_lg_prof_threshold, opt_experimental_lg_prof_threshold, size_t) +CTL_RO_NL_CGEN(config_prof, opt_experimental_lg_prof_threshold, + opt_experimental_lg_prof_threshold, size_t) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_pid_namespace, opt_prof_pid_namespace, - bool) +CTL_RO_NL_CGEN( + config_prof, opt_prof_pid_namespace, opt_prof_pid_namespace, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max, - opt_prof_recent_alloc_max, ssize_t) +CTL_RO_NL_CGEN( + config_prof, opt_prof_recent_alloc_max, opt_prof_recent_alloc_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name, - bool) +CTL_RO_NL_CGEN( + config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_time_res, prof_time_res_mode_names[opt_prof_time_res], const char *) -CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align, - opt_lg_san_uaf_align, ssize_t) +CTL_RO_NL_CGEN( + config_uaf_detection, opt_lg_san_uaf_align, opt_lg_san_uaf_align, ssize_t) CTL_RO_NL_GEN(opt_zero_realloc, zero_realloc_mode_names[opt_zero_realloc_action], const char *) -CTL_RO_NL_GEN(opt_disable_large_size_classes, opt_disable_large_size_classes, bool) +CTL_RO_NL_GEN( + opt_disable_large_size_classes, opt_disable_large_size_classes, bool) /* malloc_conf options */ CTL_RO_NL_CGEN(opt_malloc_conf_symlink, opt_malloc_conf_symlink, opt_malloc_conf_symlink, const char *) CTL_RO_NL_CGEN(opt_malloc_conf_env_var, opt_malloc_conf_env_var, opt_malloc_conf_env_var, const char *) -CTL_RO_NL_CGEN(je_malloc_conf, opt_malloc_conf_global_var, je_malloc_conf, - const char *) +CTL_RO_NL_CGEN( + je_malloc_conf, opt_malloc_conf_global_var, je_malloc_conf, const char *) CTL_RO_NL_CGEN(je_malloc_conf_2_conf_harder, opt_malloc_conf_global_var_2_conf_harder, je_malloc_conf_2_conf_harder, const char *) @@ -2368,9 +2270,9 @@ CTL_RO_NL_CGEN(je_malloc_conf_2_conf_harder, /******************************************************************************/ static int -thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; arena_t *oldarena; unsigned newind, oldind; @@ -2391,8 +2293,8 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, goto label_return; } - if (have_percpu_arena && - PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + if (have_percpu_arena + && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { /* * If perCPU arena is enabled, thread_arena @@ -2429,9 +2331,8 @@ CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *) static int thread_tcache_ncached_max_read_sizeclass_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; size_t bin_size = 0; /* Read the bin size from newp. */ @@ -2455,8 +2356,7 @@ label_return: static int thread_tcache_ncached_max_write_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; WRITEONLY(); if (newp != NULL) { @@ -2471,8 +2371,8 @@ thread_tcache_ncached_max_write_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } /* Get the length of the setting string safely. */ - char *end = (char *)memchr(settings, '\0', - CTL_MULTI_SETTING_MAX_LEN); + char *end = (char *)memchr( + settings, '\0', CTL_MULTI_SETTING_MAX_LEN); if (end == NULL) { ret = EINVAL; goto label_return; @@ -2502,10 +2402,9 @@ CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t) CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *) static int -thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; bool oldval; oldval = tcache_enabled_get(tsd); @@ -2524,10 +2423,9 @@ label_return: } static int -thread_tcache_max_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; +thread_tcache_max_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; size_t oldval; /* pointer to tcache_t always exists even with tcache disabled. */ @@ -2547,7 +2445,7 @@ thread_tcache_max_ctl(tsd_t *tsd, const size_t *mib, new_tcache_max = TCACHE_MAXCLASS_LIMIT; } new_tcache_max = sz_s2u(new_tcache_max); - if(new_tcache_max != oldval) { + if (new_tcache_max != oldval) { thread_tcache_max_set(tsd, new_tcache_max); } } @@ -2558,9 +2456,8 @@ label_return: } static int -thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!tcache_available(tsd)) { @@ -2578,9 +2475,8 @@ label_return: } static int -thread_peak_read_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { +thread_peak_read_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_stats) { return ENOENT; @@ -2595,9 +2491,8 @@ label_return: } static int -thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { +thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_stats) { return ENOENT; @@ -2610,9 +2505,8 @@ label_return: } static int -thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_prof || !opt_prof) { @@ -2642,10 +2536,9 @@ label_return: } static int -thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; bool oldval; if (!config_prof) { @@ -2675,9 +2568,8 @@ label_return: } static int -thread_idle_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { +thread_idle_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; NEITHER_READ_NOR_WRITE(); @@ -2710,9 +2602,9 @@ label_return: /******************************************************************************/ static int -tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; unsigned tcache_ind; READONLY(); @@ -2729,9 +2621,9 @@ label_return: } static int -tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; unsigned tcache_ind; WRITEONLY(); @@ -2744,9 +2636,9 @@ label_return: } static int -tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; unsigned tcache_ind; WRITEONLY(); @@ -2763,10 +2655,10 @@ label_return: static int arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - tsdn_t *tsdn = tsd_tsdn(tsd); + int ret; + tsdn_t *tsdn = tsd_tsdn(tsd); unsigned arena_ind; - bool initialized; + bool initialized; READONLY(); MIB_UNSIGNED(arena_ind, 1); @@ -2808,8 +2700,8 @@ arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { for (i = 0; i < narenas; i++) { if (tarenas[i] != NULL) { - arena_decay(tsdn, tarenas[i], false, - all); + arena_decay( + tsdn, tarenas[i], false, all); } } } else { @@ -2832,7 +2724,7 @@ arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { static int arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned arena_ind; NEITHER_READ_NOR_WRITE(); @@ -2847,7 +2739,7 @@ label_return: static int arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned arena_ind; NEITHER_READ_NOR_WRITE(); @@ -2913,12 +2805,12 @@ arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { static int arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned arena_ind; arena_t *arena; - ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, - newp, newlen, &arena_ind, &arena); + ret = arena_i_reset_destroy_helper( + tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { return ret; } @@ -2933,21 +2825,21 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, static int arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - unsigned arena_ind; - arena_t *arena; + int ret; + unsigned arena_ind; + arena_t *arena; ctl_arena_t *ctl_darena, *ctl_arena; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, - newp, newlen, &arena_ind, &arena); + ret = arena_i_reset_destroy_helper( + tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { goto label_return; } - if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, - true) != 0) { + if (arena_nthreads_get(arena, false) != 0 + || arena_nthreads_get(arena, true) != 0) { ret = EFAULT; goto label_return; } @@ -2978,16 +2870,16 @@ label_return: static int arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; const char *dss = NULL; - unsigned arena_ind; - dss_prec_t dss_prec = dss_prec_limit; + unsigned arena_ind; + dss_prec_t dss_prec = dss_prec_limit; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); MIB_UNSIGNED(arena_ind, 1); if (dss != NULL) { - int i; + int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { @@ -3009,18 +2901,19 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, * 6.0.0. */ dss_prec_t dss_prec_old; - if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == - ctl_arenas->narenas) { - if (dss_prec != dss_prec_limit && - extent_dss_prec_set(dss_prec)) { + if (arena_ind == MALLCTL_ARENAS_ALL + || arena_ind == ctl_arenas->narenas) { + if (dss_prec != dss_prec_limit + && extent_dss_prec_set(dss_prec)) { ret = EFAULT; goto label_return; } dss_prec_old = extent_dss_prec_get(); } else { arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); - if (arena == NULL || (dss_prec != dss_prec_limit && - arena_dss_prec_set(arena, dss_prec))) { + if (arena == NULL + || (dss_prec != dss_prec_limit + && arena_dss_prec_set(arena, dss_prec))) { ret = EFAULT; goto label_return; } @@ -3071,7 +2964,7 @@ label_return: static int arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { - int ret; + int ret; unsigned arena_ind; arena_t *arena; @@ -3093,8 +2986,8 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, goto label_return; } - if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state, - *(ssize_t *)newp)) { + if (arena_decay_ms_set( + tsd_tsdn(tsd), arena, state, *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -3108,21 +3001,21 @@ label_return: static int arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, - newlen, true); + return arena_i_decay_ms_ctl_impl( + tsd, mib, miblen, oldp, oldlenp, newp, newlen, true); } static int arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, - newlen, false); + return arena_i_decay_ms_ctl_impl( + tsd, mib, miblen, oldp, oldlenp, newp, newlen, false); } static int arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned arena_ind; arena_t *arena; @@ -3147,8 +3040,8 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, arena_config_t config = arena_config_default; config.extent_hooks = new_extent_hooks; - arena = arena_init(tsd_tsdn(tsd), arena_ind, - &config); + arena = arena_init( + tsd_tsdn(tsd), arena_ind, &config); if (arena == NULL) { ret = EFAULT; goto label_return; @@ -3159,13 +3052,12 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, extent_hooks_t *new_extent_hooks JEMALLOC_CC_SILENCE_INIT(NULL); WRITE(new_extent_hooks, extent_hooks_t *); - old_extent_hooks = arena_set_extent_hooks(tsd, - arena, new_extent_hooks); + old_extent_hooks = arena_set_extent_hooks( + tsd, arena, new_extent_hooks); READ(old_extent_hooks, extent_hooks_t *); } else { - old_extent_hooks = - ehooks_get_extent_hooks_ptr( - arena_get_ehooks(arena)); + old_extent_hooks = ehooks_get_extent_hooks_ptr( + arena_get_ehooks(arena)); READ(old_extent_hooks, extent_hooks_t *); } } @@ -3180,10 +3072,9 @@ label_return: } static int -arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; +arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; unsigned arena_ind; arena_t *arena; @@ -3194,14 +3085,14 @@ arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); MIB_UNSIGNED(arena_ind, 1); - if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { + if (arena_ind < narenas_total_get() + && (arena = arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { size_t old_limit, new_limit; if (newp != NULL) { WRITE(new_limit, size_t); } - bool err = arena_retain_grow_limit_get_set(tsd, arena, - &old_limit, newp != NULL ? &new_limit : NULL); + bool err = arena_retain_grow_limit_get_set( + tsd, arena, &old_limit, newp != NULL ? &new_limit : NULL); if (!err) { READ(old_limit, size_t); ret = 0; @@ -3223,16 +3114,16 @@ label_return: * ARENA_NAME_LEN or the length of the name when it was set. */ static int -arena_i_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - unsigned arena_ind; +arena_i_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; char *name JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(NULL); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); MIB_UNSIGNED(arena_ind, 1); - if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind >= - ctl_arenas->narenas) { + if (arena_ind == MALLCTL_ARENAS_ALL + || arena_ind >= ctl_arenas->narenas) { ret = EINVAL; goto label_return; } @@ -3272,8 +3163,7 @@ label_return: } static const ctl_named_node_t * -arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t i) { +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; malloc_mutex_lock(tsdn, &ctl_mtx); @@ -3298,9 +3188,9 @@ label_return: /******************************************************************************/ static int -arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; unsigned narenas; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); @@ -3315,14 +3205,13 @@ label_return: } static int -arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen, bool dirty) { +arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; if (oldp != NULL && oldlenp != NULL) { - size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : - arena_muzzy_decay_ms_default_get()); + size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() + : arena_muzzy_decay_ms_default_get()); READ(oldval, ssize_t); } if (newp != NULL) { @@ -3330,8 +3219,9 @@ arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, ret = EINVAL; goto label_return; } - if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) - : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { + if (dirty + ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) + : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -3345,15 +3235,15 @@ label_return: static int arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, - newlen, true); + return arenas_decay_ms_ctl_impl( + tsd, mib, miblen, oldp, oldlenp, newp, newlen, true); } static int arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, - newlen, false); + return arenas_decay_ms_ctl_impl( + tsd, mib, miblen, oldp, oldlenp, newp, newlen, false); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) @@ -3367,8 +3257,7 @@ CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t) static const ctl_named_node_t * -arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t i) { +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > SC_NBINS) { return NULL; } @@ -3377,10 +3266,10 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned) CTL_RO_NL_GEN(arenas_lextent_i_size, - sz_index2size_unsafe(SC_NBINS+(szind_t)mib[2]), size_t) + sz_index2size_unsafe(SC_NBINS + (szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t i) { +arenas_lextent_i_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > SC_NSIZES - SC_NBINS) { return NULL; } @@ -3388,9 +3277,9 @@ arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, } static int -arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; unsigned arena_ind; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); @@ -3411,10 +3300,9 @@ label_return: } static int -experimental_arenas_create_ext_ctl(tsd_t *tsd, - const size_t *mib, size_t miblen, +experimental_arenas_create_ext_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; + int ret; unsigned arena_ind; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); @@ -3435,22 +3323,21 @@ label_return: } static int -arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; - unsigned arena_ind; - void *ptr; +arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + void *ptr; emap_full_alloc_ctx_t alloc_ctx; - bool ptr_not_present; - arena_t *arena; + bool ptr_not_present; + arena_t *arena; ptr = NULL; ret = EINVAL; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(ptr, void *); - ptr_not_present = emap_full_alloc_ctx_try_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, - &alloc_ctx); + ptr_not_present = emap_full_alloc_ctx_try_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &alloc_ctx); if (ptr_not_present || alloc_ctx.edata == NULL) { goto label_return; } @@ -3472,10 +3359,9 @@ label_return: /******************************************************************************/ static int -prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; bool oldval; if (!config_prof) { @@ -3491,11 +3377,11 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, ret = EINVAL; goto label_return; } - oldval = prof_thread_active_init_set(tsd_tsdn(tsd), - *(bool *)newp); + oldval = prof_thread_active_init_set( + tsd_tsdn(tsd), *(bool *)newp); } else { - oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) : - false; + oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) + : false; } READ(oldval, bool); @@ -3505,9 +3391,9 @@ label_return: } static int -prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; bool oldval; if (!config_prof) { @@ -3543,9 +3429,9 @@ label_return: } static int -prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; const char *filename = NULL; if (!config_prof || !opt_prof) { @@ -3566,9 +3452,9 @@ label_return: } static int -prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; bool oldval; if (!config_prof) { @@ -3596,9 +3482,9 @@ label_return: } static int -prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; const char *prefix = NULL; if (!config_prof || !opt_prof) { @@ -3616,9 +3502,9 @@ label_return: } static int -prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; size_t lg_sample = lg_prof_sample; if (!config_prof || !opt_prof) { @@ -3689,8 +3575,7 @@ experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } if (oldp != NULL) { - prof_backtrace_hook_t old_hook = - prof_backtrace_hook_get(); + prof_backtrace_hook_t old_hook = prof_backtrace_hook_get(); READ(old_hook, prof_backtrace_hook_t); } if (newp != NULL) { @@ -3712,8 +3597,8 @@ label_return: } static int -experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (oldp == NULL && newp == NULL) { @@ -3721,8 +3606,7 @@ experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } if (oldp != NULL) { - prof_dump_hook_t old_hook = - prof_dump_hook_get(); + prof_dump_hook_t old_hook = prof_dump_hook_get(); READ(old_hook, prof_dump_hook_t); } if (newp != NULL) { @@ -3740,8 +3624,8 @@ label_return: } static int -experimental_hooks_prof_sample_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +experimental_hooks_prof_sample_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (oldp == NULL && newp == NULL) { @@ -3749,8 +3633,7 @@ experimental_hooks_prof_sample_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } if (oldp != NULL) { - prof_sample_hook_t old_hook = - prof_sample_hook_get(); + prof_sample_hook_t old_hook = prof_sample_hook_get(); READ(old_hook, prof_sample_hook_t); } if (newp != NULL) { @@ -3777,8 +3660,7 @@ experimental_hooks_prof_sample_free_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } if (oldp != NULL) { - prof_sample_free_hook_t old_hook = - prof_sample_free_hook_get(); + prof_sample_free_hook_t old_hook = prof_sample_free_hook_get(); READ(old_hook, prof_sample_free_hook_t); } if (newp != NULL) { @@ -3795,7 +3677,6 @@ label_return: return ret; } - static int experimental_hooks_prof_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { @@ -3806,8 +3687,7 @@ experimental_hooks_prof_threshold_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } if (oldp != NULL) { - prof_threshold_hook_t old_hook = - prof_threshold_hook_get(); + prof_threshold_hook_t old_hook = prof_threshold_hook_get(); READ(old_hook, prof_threshold_hook_t); } if (newp != NULL) { @@ -3822,7 +3702,7 @@ label_return: static int experimental_hooks_thread_event_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (newp == NULL) { @@ -3864,10 +3744,10 @@ label_return: CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) -CTL_RO_CGEN(config_stats, stats_metadata_edata, ctl_stats->metadata_edata, - size_t) -CTL_RO_CGEN(config_stats, stats_metadata_rtree, ctl_stats->metadata_rtree, - size_t) +CTL_RO_CGEN( + config_stats, stats_metadata_edata, ctl_stats->metadata_edata, size_t) +CTL_RO_CGEN( + config_stats, stats_metadata_rtree, ctl_stats->metadata_rtree, size_t) CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) @@ -3884,10 +3764,10 @@ CTL_RO_CGEN(config_stats, stats_zero_reallocs, atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t) CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) -CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, - ssize_t) -CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, - ssize_t) +CTL_RO_GEN( + stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, ssize_t) +CTL_RO_GEN( + stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, ssize_t) CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_uptime, nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) @@ -3903,33 +3783,38 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge), + &arenas_i(mib[2]) + ->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise), + &arenas_i(mib[2]) + ->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged), + &arenas_i(mib[2]) + ->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge), + &arenas_i(mib[2]) + ->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise), + &arenas_i(mib[2]) + ->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged), + &arenas_i(mib[2]) + ->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_base, - arenas_i(mib[2])->astats->astats.base, - size_t) + arenas_i(mib[2])->astats->astats.base, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_internal, atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), size_t) @@ -3944,12 +3829,12 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes, arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_resident, - arenas_i(mib[2])->astats->astats.resident, - size_t) + arenas_i(mib[2])->astats->astats.resident, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm, atomic_load_zu( - &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm, - ATOMIC_RELAXED), size_t) + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm, + ATOMIC_RELAXED), + size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes, arenas_i(mib[2])->astats->secstats.bytes, size_t) @@ -3984,55 +3869,55 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes, arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t) /* Lock profiling related APIs below. */ -#define RO_MUTEX_CTL_GEN(n, l) \ -CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ - l.n_lock_ops, uint64_t) \ -CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ - l.n_wait_times, uint64_t) \ -CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ - l.n_spin_acquired, uint64_t) \ -CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ - l.n_owner_switches, uint64_t) \ -CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ - nstime_ns(&l.tot_wait_time), uint64_t) \ -CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ - nstime_ns(&l.max_wait_time), uint64_t) \ -CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ - l.max_n_thds, uint32_t) +#define RO_MUTEX_CTL_GEN(n, l) \ + CTL_RO_CGEN(config_stats, stats_##n##_num_ops, l.n_lock_ops, uint64_t) \ + CTL_RO_CGEN( \ + config_stats, stats_##n##_num_wait, l.n_wait_times, uint64_t) \ + CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, l.n_spin_acquired, \ + uint64_t) \ + CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ + l.n_owner_switches, uint64_t) \ + CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ + nstime_ns(&l.tot_wait_time), uint64_t) \ + CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ + nstime_ns(&l.max_wait_time), uint64_t) \ + CTL_RO_CGEN( \ + config_stats, stats_##n##_max_num_thds, l.max_n_thds, uint32_t) /* Global mutexes. */ -#define OP(mtx) \ - RO_MUTEX_CTL_GEN(mutexes_##mtx, \ - ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) +#define OP(mtx) \ + RO_MUTEX_CTL_GEN(mutexes_##mtx, \ + ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) MUTEX_PROF_GLOBAL_MUTEXES #undef OP /* Per arena mutexes */ -#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ - arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) +#define OP(mtx) \ + RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ + arenas_i(mib[2]) \ + ->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) MUTEX_PROF_ARENA_MUTEXES #undef OP /* tcache bin mutex */ -RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, - arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) +RO_MUTEX_CTL_GEN( + arenas_i_bins_j_mutex, arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) #undef RO_MUTEX_CTL_GEN /* Resets all mutex stats, including global, arena and bin mutexes. */ static int -stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) { +stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (!config_stats) { return ENOENT; } tsdn_t *tsdn = tsd_tsdn(tsd); -#define MUTEX_PROF_RESET(mtx) \ - malloc_mutex_lock(tsdn, &mtx); \ - malloc_mutex_prof_data_reset(tsdn, &mtx); \ - malloc_mutex_unlock(tsdn, &mtx); +#define MUTEX_PROF_RESET(mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_data_reset(tsdn, &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); /* Global mutexes: ctl and prof. */ MUTEX_PROF_RESET(ctl_mtx); @@ -4100,15 +3985,17 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_batch_pops, arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.batch_pops, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_batch_failed_pushes, - arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.batch_failed_pushes, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.batch_failed_pushes, + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_batch_pushes, arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.batch_pushes, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_batch_pushed_elems, - arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.batch_pushed_elems, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.batch_pushed_elems, + uint64_t) static const ctl_named_node_t * -stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t j) { +stats_arenas_i_bins_j_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > SC_NBINS) { return NULL; } @@ -4117,19 +4004,22 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) + &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) + &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, locked_read_u64_unsynchronized( - &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) + &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) static const ctl_named_node_t * -stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t j) { +stats_arenas_i_lextents_j_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > SC_NSIZES - SC_NBINS) { return NULL; } @@ -4137,21 +4027,21 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, } CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty, - arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t); + arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy, - arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t); + arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained, - arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t); + arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes, - arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t); + arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes, - arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t); + arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes, - arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t); + arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t); static const ctl_named_node_t * -stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t j) { +stats_arenas_i_extents_j_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j >= SC_NPSIZES) { return NULL; } @@ -4182,7 +4072,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_slabs_ndirty_huge, arenas_i(mib[2])->astats->hpastats.psset_stats.slabs[1].ndirty, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes, - arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t); + arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, + uint64_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges, arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies, @@ -4194,66 +4085,92 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies, arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t); /* Full, nonhuge */ -CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge, +CTL_RO_CGEN(config_stats, + stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge, arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge, - arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, + size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge, - arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, + size_t); /* Full, huge */ CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge, arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge, - arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, + size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge, - arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, + size_t); /* Empty, nonhuge */ -CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge, +CTL_RO_CGEN(config_stats, + stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge, arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge, - arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, + size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge, - arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, + size_t); /* Empty, huge */ CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge, arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge, - arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, + size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge, - arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t); + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, + size_t); /* Nonfull, nonhuge */ -CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge, - arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs, +CTL_RO_CGEN(config_stats, + stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge, + arenas_i(mib[2]) + ->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0] + .npageslabs, size_t); -CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge, - arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive, +CTL_RO_CGEN(config_stats, + stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge, + arenas_i(mib[2]) + ->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0] + .nactive, size_t); -CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge, - arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty, +CTL_RO_CGEN(config_stats, + stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge, + arenas_i(mib[2]) + ->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0] + .ndirty, size_t); /* Nonfull, huge */ -CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge, - arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs, +CTL_RO_CGEN(config_stats, + stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge, + arenas_i(mib[2]) + ->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1] + .npageslabs, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge, - arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive, + arenas_i(mib[2]) + ->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1] + .nactive, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge, - arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty, + arenas_i(mib[2]) + ->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1] + .ndirty, size_t); static const ctl_named_node_t * -stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t j) { +stats_arenas_i_hpa_shard_nonfull_slabs_j_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j >= PSSET_NPSIZES) { return NULL; } @@ -4271,8 +4188,7 @@ ctl_arenas_i_verify(size_t i) { } static const ctl_named_node_t * -stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t i) { +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; malloc_mutex_lock(tsdn, &ctl_mtx); @@ -4291,7 +4207,7 @@ static int experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - if (oldp == NULL || oldlenp == NULL|| newp == NULL) { + if (oldp == NULL || oldlenp == NULL || newp == NULL) { ret = EINVAL; goto label_return; } @@ -4426,8 +4342,8 @@ label_return: * motivation from C++. */ static int -experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; assert(sizeof(inspect_extent_util_stats_verbose_t) @@ -4442,8 +4358,8 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, void *ptr = NULL; WRITE(ptr, void *); - inspect_extent_util_stats_verbose_t *util_stats - = (inspect_extent_util_stats_verbose_t *)oldp; + inspect_extent_util_stats_verbose_t *util_stats = + (inspect_extent_util_stats_verbose_t *)oldp; inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr, &util_stats->nfree, &util_stats->nregs, &util_stats->size, &util_stats->bin_nfree, &util_stats->bin_nregs, @@ -4565,7 +4481,7 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } - void **ptrs = (void **)newp; + void **ptrs = (void **)newp; inspect_extent_util_stats_t *util_stats = (inspect_extent_util_stats_t *)oldp; size_t i; @@ -4581,8 +4497,8 @@ label_return: } static const ctl_named_node_t * -experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib, - size_t miblen, size_t i) { +experimental_arenas_i_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; malloc_mutex_lock(tsdn, &ctl_mtx); @@ -4597,8 +4513,8 @@ label_return: } static int -experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (!config_stats) { return ENOENT; } @@ -4608,16 +4524,16 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, unsigned arena_ind; arena_t *arena; - int ret; - size_t *pactivep; + int ret; + size_t *pactivep; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); MIB_UNSIGNED(arena_ind, 2); - if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { -#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \ - defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER) + if (arena_ind < narenas_total_get() + && (arena = arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { +#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || defined(JEMALLOC_GCC_SYNC_ATOMICS) \ + || defined(_MSC_VER) /* Expose the underlying counter for fast read. */ pactivep = (size_t *)&(arena->pa_shard.nactive.repr); READ(pactivep, size_t *); @@ -4669,7 +4585,7 @@ label_return: typedef struct write_cb_packet_s write_cb_packet_t; struct write_cb_packet_s { write_cb_t *write_cb; - void *cbopaque; + void *cbopaque; }; static int @@ -4688,8 +4604,8 @@ experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib, write_cb_packet_t write_cb_packet; ASSURED_WRITE(write_cb_packet, write_cb_packet_t); - prof_recent_alloc_dump(tsd, write_cb_packet.write_cb, - write_cb_packet.cbopaque); + prof_recent_alloc_dump( + tsd, write_cb_packet.write_cb, write_cb_packet.cbopaque); ret = 0; @@ -4702,12 +4618,12 @@ struct batch_alloc_packet_s { void **ptrs; size_t num; size_t size; - int flags; + int flags; }; static int -experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; VERIFY_READ(size_t); @@ -4728,8 +4644,8 @@ label_return: static int prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - unsigned binind; + int ret; + unsigned binind; prof_stats_t stats; if (!(config_prof && opt_prof && opt_prof_stats)) { @@ -4754,8 +4670,8 @@ label_return: static int prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - unsigned binind; + int ret; + unsigned binind; prof_stats_t stats; if (!(config_prof && opt_prof && opt_prof_stats)) { @@ -4778,8 +4694,8 @@ label_return: } static const ctl_named_node_t * -prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t i) { +prof_stats_bins_i_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (!(config_prof && opt_prof && opt_prof_stats)) { return NULL; } @@ -4792,8 +4708,8 @@ prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, static int prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - unsigned lextent_ind; + int ret; + unsigned lextent_ind; prof_stats_t stats; if (!(config_prof && opt_prof && opt_prof_stats)) { @@ -4818,8 +4734,8 @@ label_return: static int prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - int ret; - unsigned lextent_ind; + int ret; + unsigned lextent_ind; prof_stats_t stats; if (!(config_prof && opt_prof && opt_prof_stats)) { @@ -4842,8 +4758,8 @@ label_return: } static const ctl_named_node_t * -prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t i) { +prof_stats_lextents_i_index( + tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (!(config_prof && opt_prof && opt_prof_stats)) { return NULL; } diff --git a/src/decay.c b/src/decay.c index f75696dd..7bbce2a6 100644 --- a/src/decay.c +++ b/src/decay.c @@ -4,9 +4,8 @@ #include "jemalloc/internal/decay.h" static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { -#define STEP(step, h, x, y) \ - h, - SMOOTHSTEP +#define STEP(step, h, x, y) h, + SMOOTHSTEP #undef STEP }; @@ -21,8 +20,9 @@ decay_deadline_init(decay_t *decay) { if (decay_ms_read(decay) > 0) { nstime_t jitter; - nstime_init(&jitter, prng_range_u64(&decay->jitter_state, - nstime_ns(&decay->interval))); + nstime_init(&jitter, + prng_range_u64( + &decay->jitter_state, nstime_ns(&decay->interval))); nstime_add(&decay->deadline, &jitter); } } @@ -31,8 +31,8 @@ void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) { atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); if (decay_ms > 0) { - nstime_init(&decay->interval, (uint64_t)decay_ms * - KQU(1000000)); + nstime_init( + &decay->interval, (uint64_t)decay_ms * KQU(1000000)); nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); } @@ -52,7 +52,7 @@ decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) { decay->ceil_npages = 0; } if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { return true; } decay->purging = false; @@ -65,8 +65,8 @@ decay_ms_valid(ssize_t decay_ms) { if (decay_ms < -1) { return false; } - if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * - KQU(1000)) { + if (decay_ms == -1 + || (uint64_t)decay_ms <= NSTIME_SEC_MAX * KQU(1000)) { return true; } return false; @@ -74,8 +74,8 @@ decay_ms_valid(ssize_t decay_ms) { static void decay_maybe_update_time(decay_t *decay, nstime_t *new_time) { - if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, - new_time) > 0)) { + if (unlikely(!nstime_monotonic() + && nstime_compare(&decay->epoch, new_time) > 0)) { /* * Time went backwards. Move the epoch back in time and * generate a new deadline, with the expectation that time @@ -115,11 +115,11 @@ decay_backlog_npages_limit(const decay_t *decay) { * placed as the newest record. */ static void -decay_backlog_update(decay_t *decay, uint64_t nadvance_u64, - size_t current_npages) { +decay_backlog_update( + decay_t *decay, uint64_t nadvance_u64, size_t current_npages) { if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { - memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * - sizeof(size_t)); + memset(decay->backlog, 0, + (SMOOTHSTEP_NSTEPS - 1) * sizeof(size_t)); } else { size_t nadvance_z = (size_t)nadvance_u64; @@ -128,14 +128,15 @@ decay_backlog_update(decay_t *decay, uint64_t nadvance_u64, memmove(decay->backlog, &decay->backlog[nadvance_z], (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); if (nadvance_z > 1) { - memset(&decay->backlog[SMOOTHSTEP_NSTEPS - - nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + memset(&decay->backlog[SMOOTHSTEP_NSTEPS - nadvance_z], + 0, (nadvance_z - 1) * sizeof(size_t)); } } - size_t npages_delta = (current_npages > decay->nunpurged) ? - current_npages - decay->nunpurged : 0; - decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; + size_t npages_delta = (current_npages > decay->nunpurged) + ? current_npages - decay->nunpurged + : 0; + decay->backlog[SMOOTHSTEP_NSTEPS - 1] = npages_delta; if (config_debug) { if (current_npages > decay->ceil_npages) { @@ -165,18 +166,17 @@ decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) { npages_purge = npages_new; } else { uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; - assert(h_steps_max >= - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); - npages_purge = npages_new * (h_steps_max - - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + assert(h_steps_max >= h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npages_purge = npages_new + * (h_steps_max - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); npages_purge >>= SMOOTHSTEP_BFP; } return npages_purge; } bool -decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, - size_t npages_current) { +decay_maybe_advance_epoch( + decay_t *decay, nstime_t *new_time, size_t npages_current) { /* Handle possible non-monotonicity of time. */ decay_maybe_update_time(decay, new_time); @@ -202,8 +202,9 @@ decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, decay_backlog_update(decay, nadvance_u64, npages_current); decay->npages_limit = decay_backlog_npages_limit(decay); - decay->nunpurged = (decay->npages_limit > npages_current) ? - decay->npages_limit : npages_current; + decay->nunpurged = (decay->npages_limit > npages_current) + ? decay->npages_limit + : npages_current; return true; } @@ -226,21 +227,21 @@ decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, */ static inline size_t decay_npurge_after_interval(decay_t *decay, size_t interval) { - size_t i; + size_t i; uint64_t sum = 0; for (i = 0; i < interval; i++) { sum += decay->backlog[i] * h_steps[i]; } for (; i < SMOOTHSTEP_NSTEPS; i++) { - sum += decay->backlog[i] * - (h_steps[i] - h_steps[i - interval]); + sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); } return (size_t)(sum >> SMOOTHSTEP_BFP); } -uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current, - uint64_t npages_threshold) { +uint64_t +decay_ns_until_purge( + decay_t *decay, size_t npages_current, uint64_t npages_threshold) { if (!decay_gradually(decay)) { return DECAY_UNBOUNDED_TIME_TO_PURGE; } @@ -278,7 +279,7 @@ uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current, } unsigned n_search = 0; - size_t target, npurge; + size_t target, npurge; while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) { target = (lb + ub) / 2; npurge = decay_npurge_after_interval(decay, target); diff --git a/src/ecache.c b/src/ecache.c index a242227d..20fcee9e 100644 --- a/src/ecache.c +++ b/src/ecache.c @@ -7,7 +7,7 @@ bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind, bool delay_coalesce) { if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { return true; } ecache->state = state; diff --git a/src/edata.c b/src/edata.c index 82b6f565..d71d1679 100644 --- a/src/edata.c +++ b/src/edata.c @@ -1,6 +1,5 @@ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" -ph_gen(, edata_avail, edata_t, avail_link, - edata_esnead_comp) -ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp) +ph_gen(, edata_avail, edata_t, avail_link, edata_esnead_comp) + ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp) diff --git a/src/edata_cache.c b/src/edata_cache.c index 6bc1848c..3ac8273a 100644 --- a/src/edata_cache.c +++ b/src/edata_cache.c @@ -11,7 +11,7 @@ edata_cache_init(edata_cache_t *edata_cache, base_t *base) { */ atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED); if (malloc_mutex_init(&edata_cache->mtx, "edata_cache", - WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) { return true; } edata_cache->base = base; @@ -63,8 +63,7 @@ edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) { } static void -edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn, - edata_cache_fast_t *ecs) { +edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn, edata_cache_fast_t *ecs) { edata_t *edata; malloc_mutex_lock(tsdn, &ecs->fallback->mtx); for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) { @@ -80,8 +79,8 @@ edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn, edata_t * edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_EDATA_CACHE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_EDATA_CACHE, 0); if (ecs->disabled) { assert(edata_list_inactive_first(&ecs->list) == NULL); @@ -118,7 +117,7 @@ edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) { * flush and disable pathways. */ edata_t *edata; - size_t nflushed = 0; + size_t nflushed = 0; malloc_mutex_lock(tsdn, &ecs->fallback->mtx); while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) { edata_list_inactive_remove(&ecs->list, edata); @@ -131,8 +130,8 @@ edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) { void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_EDATA_CACHE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_EDATA_CACHE, 0); if (ecs->disabled) { assert(edata_list_inactive_first(&ecs->list) == NULL); diff --git a/src/ehooks.c b/src/ehooks.c index 89e30409..d7abb960 100644 --- a/src/ehooks.c +++ b/src/ehooks.c @@ -27,9 +27,10 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, assert(alignment != 0); /* "primary" dss. */ - if (have_dss && dss_prec == dss_prec_primary && (ret = - extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) { + if (have_dss && dss_prec == dss_prec_primary + && (ret = extent_alloc_dss( + tsdn, arena, new_addr, size, alignment, zero, commit)) + != NULL) { return ret; } /* mmap. */ @@ -38,9 +39,10 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, return ret; } /* "secondary" dss. */ - if (have_dss && dss_prec == dss_prec_secondary && (ret = - extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) { + if (have_dss && dss_prec == dss_prec_secondary + && (ret = extent_alloc_dss( + tsdn, arena, new_addr, size, alignment, zero, commit)) + != NULL) { return ret; } @@ -54,10 +56,11 @@ ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size, arena_t *arena = arena_get(tsdn, arena_ind, false); /* NULL arena indicates arena_create. */ assert(arena != NULL || alignment == BASE_BLOCK_MIN_ALIGN); - dss_prec_t dss = (arena == NULL) ? dss_prec_disabled : - (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED); - void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, - zero, commit, dss); + dss_prec_t dss = (arena == NULL) + ? dss_prec_disabled + : (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED); + void *ret = extent_alloc_core( + tsdn, arena, new_addr, size, alignment, zero, commit, dss); if (have_madvise_huge && ret) { pages_set_thp_state(ret, size); } @@ -100,8 +103,8 @@ ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length) { - return pages_commit((void *)((byte_t *)addr + (uintptr_t)offset), - length); + return pages_commit( + (void *)((byte_t *)addr + (uintptr_t)offset), length); } static bool @@ -112,8 +115,8 @@ ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) { - return pages_decommit((void *)((byte_t *)addr + (uintptr_t)offset), - length); + return pages_decommit( + (void *)((byte_t *)addr + (uintptr_t)offset), length); } static bool @@ -125,8 +128,8 @@ ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, #ifdef PAGES_CAN_PURGE_LAZY bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) { - return pages_purge_lazy((void *)((byte_t *)addr + (uintptr_t)offset), - length); + return pages_purge_lazy( + (void *)((byte_t *)addr + (uintptr_t)offset), length); } static bool @@ -143,8 +146,8 @@ ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size, #ifdef PAGES_CAN_PURGE_FORCED bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) { - return pages_purge_forced((void *)((byte_t *)addr + - (uintptr_t)offset), length); + return pages_purge_forced( + (void *)((byte_t *)addr + (uintptr_t)offset), length); } static bool @@ -201,11 +204,11 @@ ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) { return true; } if (config_debug) { - edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global, - addr_a); - bool head_a = edata_is_head_get(a); - edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global, - addr_b); + edata_t *a = emap_edata_lookup( + tsdn, &arena_emap_global, addr_a); + bool head_a = edata_is_head_get(a); + edata_t *b = emap_edata_lookup( + tsdn, &arena_emap_global, addr_b); bool head_b = edata_is_head_get(b); emap_assert_mapped(tsdn, &arena_emap_global, a); emap_assert_mapped(tsdn, &arena_emap_global, b); @@ -254,22 +257,17 @@ ehooks_default_unguard_impl(void *guard1, void *guard2) { pages_unmark_guards(guard1, guard2); } -const extent_hooks_t ehooks_default_extent_hooks = { - ehooks_default_alloc, - ehooks_default_dalloc, - ehooks_default_destroy, - ehooks_default_commit, - ehooks_default_decommit, +const extent_hooks_t ehooks_default_extent_hooks = {ehooks_default_alloc, + ehooks_default_dalloc, ehooks_default_destroy, ehooks_default_commit, + ehooks_default_decommit, #ifdef PAGES_CAN_PURGE_LAZY - ehooks_default_purge_lazy, + ehooks_default_purge_lazy, #else - NULL, + NULL, #endif #ifdef PAGES_CAN_PURGE_FORCED - ehooks_default_purge_forced, + ehooks_default_purge_forced, #else - NULL, + NULL, #endif - ehooks_default_split, - ehooks_default_merge -}; + ehooks_default_split, ehooks_default_merge}; diff --git a/src/emap.c b/src/emap.c index f7d5c25a..54bfabab 100644 --- a/src/emap.c +++ b/src/emap.c @@ -16,10 +16,10 @@ emap_init(emap_t *emap, base_t *base, bool zeroed) { } void -emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - extent_state_t state) { - witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE); +emap_update_edata_state( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t state) { + witness_assert_positive_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE); edata_state_set(edata, state); @@ -28,10 +28,11 @@ emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata, rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true, /* init_missing */ false); assert(elm1 != NULL); - rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL : - rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, - (uintptr_t)edata_last_get(edata), /* dependent */ true, - /* init_missing */ false); + rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE + ? NULL + : rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_last_get(edata), /* dependent */ true, + /* init_missing */ false); rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state); @@ -42,17 +43,17 @@ static inline edata_t * emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_pai_t pai, extent_state_t expected_state, bool forward, bool expanding) { - witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE); + witness_assert_positive_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE); assert(!edata_guarded_get(edata)); assert(!expanding || forward); assert(!edata_state_in_transition(expected_state)); - assert(expected_state == extent_state_dirty || - expected_state == extent_state_muzzy || - expected_state == extent_state_retained); + assert(expected_state == extent_state_dirty + || expected_state == extent_state_muzzy + || expected_state == extent_state_retained); - void *neighbor_addr = forward ? edata_past_get(edata) : - edata_before_get(edata); + void *neighbor_addr = forward ? edata_past_get(edata) + : edata_before_get(edata); /* * This is subtle; the rtree code asserts that its input pointer is * non-NULL, and this is a useful thing to check. But it's possible @@ -73,10 +74,10 @@ emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata, return NULL; } - rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn, - &emap->rtree, elm, /* dependent */ false); + rtree_contents_t neighbor_contents = rtree_leaf_elm_read( + tsdn, &emap->rtree, elm, /* dependent */ false); if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai, - expected_state, forward, expanding)) { + expected_state, forward, expanding)) { return NULL; } @@ -109,8 +110,8 @@ emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap, } void -emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - extent_state_t new_state) { +emap_release_edata( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, extent_state_t new_state) { assert(emap_edata_in_transition(tsdn, emap, edata)); assert(emap_edata_is_acquired(tsdn, emap, edata)); @@ -145,8 +146,8 @@ emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a, contents.edata = edata; contents.metadata.szind = szind; contents.metadata.slab = slab; - contents.metadata.is_head = (edata == NULL) ? false : - edata_is_head_get(edata); + contents.metadata.is_head = (edata == NULL) ? false + : edata_is_head_get(edata); contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata); rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents); if (elm_b != NULL) { @@ -155,29 +156,33 @@ emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a, } bool -emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - szind_t szind, bool slab) { +emap_register_boundary( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab) { assert(edata_state_get(edata) == extent_state_active); EMAP_DECLARE_RTREE_CTX; rtree_leaf_elm_t *elm_a, *elm_b; - bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata, - false, true, &elm_a, &elm_b); + bool err = emap_rtree_leaf_elms_lookup( + tsdn, emap, rtree_ctx, edata, false, true, &elm_a, &elm_b); if (err) { return true; } assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a, - /* dependent */ false).edata == NULL); + /* dependent */ false) + .edata + == NULL); assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b, - /* dependent */ false).edata == NULL); + /* dependent */ false) + .edata + == NULL); emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab); return false; } /* Invoked *after* emap_register_boundary. */ void -emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata, - szind_t szind) { +emap_register_interior( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind) { EMAP_DECLARE_RTREE_CTX; assert(edata_slab_get(edata)); @@ -226,10 +231,10 @@ emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { EMAP_DECLARE_RTREE_CTX; rtree_leaf_elm_t *elm_a, *elm_b; - emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata, - true, false, &elm_a, &elm_b); - emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES, - false); + emap_rtree_leaf_elms_lookup( + tsdn, emap, rtree_ctx, edata, true, false, &elm_a, &elm_b); + emap_rtree_write_acquired( + tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES, false); } void @@ -245,8 +250,8 @@ emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { } void -emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, - bool slab) { +emap_remap( + tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, bool slab) { EMAP_DECLARE_RTREE_CTX; if (szind != SC_NSIZES) { @@ -274,8 +279,8 @@ emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, if (slab && edata_size_get(edata) > PAGE) { uintptr_t key = (uintptr_t)edata_past_get(edata) - (uintptr_t)PAGE; - rtree_write(tsdn, &emap->rtree, rtree_ctx, key, - contents); + rtree_write( + tsdn, &emap->rtree, rtree_ctx, key, contents); } } } @@ -344,29 +349,29 @@ emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, clear_contents.metadata.state = (extent_state_t)0; if (prepare->lead_elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &emap->rtree, - prepare->lead_elm_b, clear_contents); + rtree_leaf_elm_write( + tsdn, &emap->rtree, prepare->lead_elm_b, clear_contents); } rtree_leaf_elm_t *merged_b; if (prepare->trail_elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &emap->rtree, - prepare->trail_elm_a, clear_contents); + rtree_leaf_elm_write( + tsdn, &emap->rtree, prepare->trail_elm_a, clear_contents); merged_b = prepare->trail_elm_b; } else { merged_b = prepare->trail_elm_a; } - emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b, - lead, SC_NSIZES, false); + emap_rtree_write_acquired( + tsdn, emap, prepare->lead_elm_a, merged_b, lead, SC_NSIZES, false); } void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { EMAP_DECLARE_RTREE_CTX; - rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, - (uintptr_t)edata_base_get(edata)); + rtree_contents_t contents = rtree_read( + tsdn, &emap->rtree, rtree_ctx, (uintptr_t)edata_base_get(edata)); assert(contents.edata == edata); assert(contents.metadata.is_head == edata_is_head_get(edata)); assert(contents.metadata.state == edata_state_get(edata)); @@ -375,12 +380,12 @@ emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { emap_full_alloc_ctx_t context1 = {0}; - emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata), - &context1); + emap_full_alloc_ctx_try_lookup( + tsdn, emap, edata_base_get(edata), &context1); assert(context1.edata == NULL); emap_full_alloc_ctx_t context2 = {0}; - emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata), - &context2); + emap_full_alloc_ctx_try_lookup( + tsdn, emap, edata_last_get(edata), &context2); assert(context2.edata == NULL); } diff --git a/src/eset.c b/src/eset.c index b4666e2c..4a427d78 100644 --- a/src/eset.c +++ b/src/eset.c @@ -48,32 +48,32 @@ eset_nbytes_get(eset_t *eset, pszind_t pind) { static void eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) { - size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents, - ATOMIC_RELAXED); - atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1, - ATOMIC_RELAXED); + size_t cur = atomic_load_zu( + &eset->bin_stats[pind].nextents, ATOMIC_RELAXED); + atomic_store_zu( + &eset->bin_stats[pind].nextents, cur + 1, ATOMIC_RELAXED); cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED); - atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz, - ATOMIC_RELAXED); + atomic_store_zu( + &eset->bin_stats[pind].nbytes, cur + sz, ATOMIC_RELAXED); } static void eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) { - size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents, - ATOMIC_RELAXED); - atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1, - ATOMIC_RELAXED); + size_t cur = atomic_load_zu( + &eset->bin_stats[pind].nextents, ATOMIC_RELAXED); + atomic_store_zu( + &eset->bin_stats[pind].nextents, cur - 1, ATOMIC_RELAXED); cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED); - atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz, - ATOMIC_RELAXED); + atomic_store_zu( + &eset->bin_stats[pind].nbytes, cur - sz, ATOMIC_RELAXED); } void eset_insert(eset_t *eset, edata_t *edata) { assert(edata_state_get(edata) == eset->state); - size_t size = edata_size_get(edata); - size_t psz = sz_psz_quantize_floor(size); + size_t size = edata_size_get(edata); + size_t psz = sz_psz_quantize_floor(size); pszind_t pind = sz_psz2ind(psz); edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata); @@ -86,8 +86,9 @@ eset_insert(eset_t *eset, edata_t *edata) { * There's already a min element; update the summary if we're * about to insert a lower one. */ - if (edata_cmp_summary_comp(edata_cmp_summary, - eset->bins[pind].heap_min) < 0) { + if (edata_cmp_summary_comp( + edata_cmp_summary, eset->bins[pind].heap_min) + < 0) { eset->bins[pind].heap_min = edata_cmp_summary; } } @@ -104,19 +105,18 @@ eset_insert(eset_t *eset, edata_t *edata) { * don't need an atomic fetch-add; we can get by with a load followed by * a store. */ - size_t cur_eset_npages = - atomic_load_zu(&eset->npages, ATOMIC_RELAXED); - atomic_store_zu(&eset->npages, cur_eset_npages + npages, - ATOMIC_RELAXED); + size_t cur_eset_npages = atomic_load_zu(&eset->npages, ATOMIC_RELAXED); + atomic_store_zu( + &eset->npages, cur_eset_npages + npages, ATOMIC_RELAXED); } void eset_remove(eset_t *eset, edata_t *edata) { - assert(edata_state_get(edata) == eset->state || - edata_state_in_transition(edata_state_get(edata))); + assert(edata_state_get(edata) == eset->state + || edata_state_in_transition(edata_state_get(edata))); - size_t size = edata_size_get(edata); - size_t psz = sz_psz_quantize_floor(size); + size_t size = edata_size_get(edata); + size_t psz = sz_psz_quantize_floor(size); pszind_t pind = sz_psz2ind(psz); if (config_stats) { eset_stats_sub(eset, pind, size); @@ -136,8 +136,9 @@ eset_remove(eset_t *eset, edata_t *edata) { * summaries of the removed element and the min element should * compare equal. */ - if (edata_cmp_summary_comp(edata_cmp_summary, - eset->bins[pind].heap_min) == 0) { + if (edata_cmp_summary_comp( + edata_cmp_summary, eset->bins[pind].heap_min) + == 0) { eset->bins[pind].heap_min = edata_cmp_summary_get( edata_heap_first(&eset->bins[pind].heap)); } @@ -148,35 +149,35 @@ eset_remove(eset_t *eset, edata_t *edata) { * As in eset_insert, we hold eset->mtx and so don't need atomic * operations for updating eset->npages. */ - size_t cur_extents_npages = - atomic_load_zu(&eset->npages, ATOMIC_RELAXED); + size_t cur_extents_npages = atomic_load_zu( + &eset->npages, ATOMIC_RELAXED); assert(cur_extents_npages >= npages); - atomic_store_zu(&eset->npages, - cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); + atomic_store_zu(&eset->npages, cur_extents_npages - (size >> LG_PAGE), + ATOMIC_RELAXED); } static edata_t * -eset_enumerate_alignment_search(eset_t *eset, size_t size, pszind_t bin_ind, - size_t alignment) { +eset_enumerate_alignment_search( + eset_t *eset, size_t size, pszind_t bin_ind, size_t alignment) { if (edata_heap_empty(&eset->bins[bin_ind].heap)) { return NULL; } - edata_t *edata = NULL; + edata_t *edata = NULL; edata_heap_enumerate_helper_t helper; edata_heap_enumerate_prepare(&eset->bins[bin_ind].heap, &helper, - ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue)/sizeof(void *)); - while ((edata = - edata_heap_enumerate_next(&eset->bins[bin_ind].heap, &helper)) != - NULL) { + ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue) / sizeof(void *)); + while ((edata = edata_heap_enumerate_next( + &eset->bins[bin_ind].heap, &helper)) + != NULL) { uintptr_t base = (uintptr_t)edata_base_get(edata); - size_t candidate_size = edata_size_get(edata); + size_t candidate_size = edata_size_get(edata); if (candidate_size < size) { continue; } - uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, - PAGE_CEILING(alignment)); + uintptr_t next_align = ALIGNMENT_CEILING( + (uintptr_t)base, PAGE_CEILING(alignment)); if (base > next_align || base + candidate_size <= next_align) { /* Overflow or not crossing the next alignment. */ continue; @@ -198,19 +199,20 @@ eset_enumerate_search(eset_t *eset, size_t size, pszind_t bin_ind, return NULL; } - edata_t *ret = NULL, *edata = NULL; + edata_t *ret = NULL, *edata = NULL; edata_heap_enumerate_helper_t helper; edata_heap_enumerate_prepare(&eset->bins[bin_ind].heap, &helper, - ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue)/sizeof(void *)); - while ((edata = - edata_heap_enumerate_next(&eset->bins[bin_ind].heap, &helper)) != - NULL) { - if ((!exact_only && edata_size_get(edata) >= size) || - (exact_only && edata_size_get(edata) == size)) { - edata_cmp_summary_t temp_summ = - edata_cmp_summary_get(edata); - if (ret == NULL || edata_cmp_summary_comp(temp_summ, - *ret_summ) < 0) { + ESET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue) / sizeof(void *)); + while ((edata = edata_heap_enumerate_next( + &eset->bins[bin_ind].heap, &helper)) + != NULL) { + if ((!exact_only && edata_size_get(edata) >= size) + || (exact_only && edata_size_get(edata) == size)) { + edata_cmp_summary_t temp_summ = edata_cmp_summary_get( + edata); + if (ret == NULL + || edata_cmp_summary_comp(temp_summ, *ret_summ) + < 0) { ret = edata; *ret_summ = temp_summ; } @@ -225,8 +227,8 @@ eset_enumerate_search(eset_t *eset, size_t size, pszind_t bin_ind, * requirement. For each size, try only the first extent in the heap. */ static edata_t * -eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size, - size_t alignment) { +eset_fit_alignment( + eset_t *eset, size_t min_size, size_t max_size, size_t alignment) { pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size)); pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size)); @@ -234,26 +236,26 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size, pszind_t pind_prev = sz_psz2ind(sz_psz_quantize_floor(min_size)); if (sz_large_size_classes_disabled() && pind != pind_prev) { edata_t *ret = NULL; - ret = eset_enumerate_alignment_search(eset, min_size, pind_prev, - alignment); + ret = eset_enumerate_alignment_search( + eset, min_size, pind_prev, alignment); if (ret != NULL) { return ret; } } for (pszind_t i = - (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind); - i < pind_max; - i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) { + (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind); + i < pind_max; + i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) { assert(i < SC_NPSIZES); assert(!edata_heap_empty(&eset->bins[i].heap)); - edata_t *edata = edata_heap_first(&eset->bins[i].heap); + edata_t *edata = edata_heap_first(&eset->bins[i].heap); uintptr_t base = (uintptr_t)edata_base_get(edata); - size_t candidate_size = edata_size_get(edata); + size_t candidate_size = edata_size_get(edata); assert(candidate_size >= min_size); - uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, - PAGE_CEILING(alignment)); + uintptr_t next_align = ALIGNMENT_CEILING( + (uintptr_t)base, PAGE_CEILING(alignment)); if (base > next_align || base + candidate_size <= next_align) { /* Overflow or not crossing the next alignment. */ continue; @@ -279,22 +281,23 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size, * for others. */ static edata_t * -eset_first_fit(eset_t *eset, size_t size, bool exact_only, - unsigned lg_max_fit) { - edata_t *ret = NULL; +eset_first_fit( + eset_t *eset, size_t size, bool exact_only, unsigned lg_max_fit) { + edata_t *ret = NULL; edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0}); pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size)); if (exact_only) { if (sz_large_size_classes_disabled()) { - pszind_t pind_prev = - sz_psz2ind(sz_psz_quantize_floor(size)); + pszind_t pind_prev = sz_psz2ind( + sz_psz_quantize_floor(size)); return eset_enumerate_search(eset, size, pind_prev, /* exact_only */ true, &ret_summ); } else { - return edata_heap_empty(&eset->bins[pind].heap) ? NULL: - edata_heap_first(&eset->bins[pind].heap); + return edata_heap_empty(&eset->bins[pind].heap) + ? NULL + : edata_heap_first(&eset->bins[pind].heap); } } @@ -321,15 +324,15 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only, * usize and thus should be enumerated. */ pszind_t pind_prev = sz_psz2ind(sz_psz_quantize_floor(size)); - if (sz_large_size_classes_disabled() && pind != pind_prev){ + if (sz_large_size_classes_disabled() && pind != pind_prev) { ret = eset_enumerate_search(eset, size, pind_prev, /* exact_only */ false, &ret_summ); } for (pszind_t i = - (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind); - i < ESET_NPSIZES; - i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) { + (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind); + i < ESET_NPSIZES; + i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) { assert(!edata_heap_empty(&eset->bins[i].heap)); if (lg_max_fit == SC_PTR_BITS) { /* @@ -342,8 +345,9 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only, if ((sz_pind2sz(i) >> lg_max_fit) > size) { break; } - if (ret == NULL || edata_cmp_summary_comp( - eset->bins[i].heap_min, ret_summ) < 0) { + if (ret == NULL + || edata_cmp_summary_comp(eset->bins[i].heap_min, ret_summ) + < 0) { /* * We grab the edata as early as possible, even though * we might change it later. Practically, a large @@ -354,9 +358,10 @@ eset_first_fit(eset_t *eset, size_t size, bool exact_only, edata_t *edata = edata_heap_first(&eset->bins[i].heap); assert(edata_size_get(edata) >= size); assert(ret == NULL || edata_snad_comp(edata, ret) < 0); - assert(ret == NULL || edata_cmp_summary_comp( - eset->bins[i].heap_min, - edata_cmp_summary_get(edata)) == 0); + assert(ret == NULL + || edata_cmp_summary_comp(eset->bins[i].heap_min, + edata_cmp_summary_get(edata)) + == 0); ret = edata; ret_summ = eset->bins[i].heap_min; } diff --git a/src/extent.c b/src/extent.c index 03a3fdd8..0a23bbd9 100644 --- a/src/extent.c +++ b/src/extent.c @@ -19,7 +19,7 @@ size_t opt_process_madvise_max_batch = #else 0 #endif - ; +; static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length, bool growing_retained); @@ -29,8 +29,8 @@ static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length, bool growing_retained); static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks); -static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *a, edata_t *b, bool holding_core_locks); +static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *a, edata_t *b, bool holding_core_locks); /* Used exclusively for gdump triggering. */ static atomic_zu_t curpages; @@ -42,7 +42,7 @@ static atomic_zu_t highpages; * definition. */ -static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata); +static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata); static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment, bool zero, bool *commit, bool growing_retained, bool guarded); @@ -51,8 +51,8 @@ static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment, bool zero, bool *commit, bool guarded); -static bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, - edata_t *edata, size_t offset, size_t length); +static bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, + edata_t *edata, size_t offset, size_t length); /******************************************************************************/ @@ -73,8 +73,8 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active); bool coalesced; - edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, - edata, &coalesced); + edata = extent_try_coalesce( + tsdn, pac, ehooks, ecache, edata, &coalesced); emap_update_edata_state(tsdn, pac->emap, edata, ecache->state); if (!coalesced) { @@ -90,10 +90,10 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, bool guarded) { assert(size != 0); assert(alignment != 0); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - bool commit = true; + bool commit = true; edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata, size, alignment, zero, &commit, false, guarded); assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC); @@ -107,10 +107,10 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, bool guarded) { assert(size != 0); assert(alignment != 0); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - bool commit = true; + bool commit = true; edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata, size, alignment, zero, &commit, guarded); if (edata == NULL) { @@ -131,10 +131,11 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, */ return NULL; } - void *new_addr = (expand_edata == NULL) ? NULL : - edata_past_get(expand_edata); - edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr, - size, alignment, zero, &commit, + void *new_addr = (expand_edata == NULL) + ? NULL + : edata_past_get(expand_edata); + edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr, size, + alignment, zero, &commit, /* growing_retained */ false); } @@ -148,8 +149,8 @@ ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, assert(edata_base_get(edata) != NULL); assert(edata_size_get(edata) != 0); assert(edata_pai_get(edata) == EXTENT_PAI_PAC); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); edata_addr_set(edata, edata_base_get(edata)); edata_zeroed_set(edata, false); @@ -158,8 +159,8 @@ ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, } edata_t * -ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - ecache_t *ecache, size_t npages_min) { +ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + size_t npages_min) { malloc_mutex_lock(tsdn, &ecache->mtx); /* @@ -194,8 +195,8 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, break; } /* Try to coalesce. */ - if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache, - edata)) { + if (extent_try_delayed_coalesce( + tsdn, pac, ehooks, ecache, edata)) { break; } /* @@ -211,8 +212,8 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, switch (ecache->state) { case extent_state_dirty: case extent_state_muzzy: - emap_update_edata_state(tsdn, pac->emap, edata, - extent_state_active); + emap_update_edata_state( + tsdn, pac->emap, edata, extent_state_active); break; case extent_state_retained: extent_deregister(tsdn, pac, edata); @@ -238,16 +239,16 @@ extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool growing_retained) { size_t sz = edata_size_get(edata); if (config_stats) { - atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz, - ATOMIC_RELAXED); + atomic_fetch_add_zu( + &pac->stats->abandoned_vm, sz, ATOMIC_RELAXED); } /* * Leak extent after making sure its pages have already been purged, so * that this is only a virtual memory leak. */ if (ecache->state == extent_state_dirty) { - if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz, - growing_retained)) { + if (extent_purge_lazy_impl( + tsdn, ehooks, edata, 0, sz, growing_retained)) { extent_purge_forced_impl(tsdn, ehooks, edata, 0, edata_size_get(edata), growing_retained); } @@ -256,20 +257,20 @@ extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, } static void -extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, - edata_t *edata) { +extent_deactivate_locked_impl( + tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, edata_t *edata) { malloc_mutex_assert_owner(tsdn, &ecache->mtx); assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache)); emap_update_edata_state(tsdn, pac->emap, edata, ecache->state); - eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset : - &ecache->eset; + eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset + : &ecache->eset; eset_insert(eset, edata); } static void -extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, - edata_t *edata) { +extent_deactivate_locked( + tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, edata_t *edata) { assert(edata_state_get(edata) == extent_state_active); extent_deactivate_locked_impl(tsdn, pac, ecache, edata); } @@ -282,11 +283,11 @@ extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, } static void -extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset, - edata_t *edata) { +extent_activate_locked( + tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset, edata_t *edata) { assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache)); - assert(edata_state_get(edata) == ecache->state || - edata_state_get(edata) == extent_state_merging); + assert(edata_state_get(edata) == ecache->state + || edata_state_get(edata) == extent_state_merging); eset_remove(eset, edata); emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active); @@ -296,16 +297,18 @@ void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) { cassert(config_prof); /* prof_gdump() requirement. */ - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (opt_prof && edata_state_get(edata) == extent_state_active) { size_t nadd = edata_size_get(edata) >> LG_PAGE; - size_t cur = atomic_fetch_add_zu(&curpages, nadd, - ATOMIC_RELAXED) + nadd; + size_t cur = atomic_fetch_add_zu( + &curpages, nadd, ATOMIC_RELAXED) + + nadd; size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); - while (cur > high && !atomic_compare_exchange_weak_zu( - &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { + while (cur > high + && !atomic_compare_exchange_weak_zu(&highpages, &high, cur, + ATOMIC_RELAXED, ATOMIC_RELAXED)) { /* * Don't refresh cur, because it may have decreased * since this thread lost the highpages update race. @@ -337,7 +340,7 @@ extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) { * prevents other threads from accessing the edata. */ if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES, - /* slab */ false)) { + /* slab */ false)) { return true; } @@ -368,8 +371,7 @@ extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) { * Removes all pointers to the given extent from the global rtree. */ static void -extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, - bool gdump) { +extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump) { emap_deregister_boundary(tsdn, pac->emap, edata); if (config_prof && gdump) { @@ -383,8 +385,7 @@ extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) { } static void -extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac, - edata_t *edata) { +extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac, edata_t *edata) { extent_deregister_impl(tsdn, pac, edata, false); } @@ -411,7 +412,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } edata_t *edata; - eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset; + eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset; if (expand_edata != NULL) { edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap, expand_edata, EXTENT_PAI_PAC, ecache->state); @@ -419,8 +420,8 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, /* NOLINTNEXTLINE(readability-suspicious-call-argument) */ extent_assert_can_expand(expand_edata, edata); if (edata_size_get(edata) < size) { - emap_release_edata(tsdn, pac->emap, edata, - ecache->state); + emap_release_edata( + tsdn, pac->emap, edata, ecache->state); edata = NULL; } } @@ -435,7 +436,8 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, * put a cap on how big an extent we can split for a request. */ unsigned lg_max_fit = ecache->delay_coalesce - ? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS; + ? (unsigned)opt_lg_extent_max_active_fit + : SC_PTR_BITS; /* * If split and merge are not allowed (Windows w/o retain), try @@ -446,8 +448,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, * allocations. */ bool exact_only = (!maps_coalesce && !opt_retain) || guarded; - edata = eset_fit(eset, size, alignment, exact_only, - lg_max_fit); + edata = eset_fit(eset, size, alignment, exact_only, lg_max_fit); } if (edata == NULL) { return NULL; @@ -489,10 +490,11 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, /* The result of splitting, in case of success. */ edata_t **edata, edata_t **lead, edata_t **trail, /* The mess to clean up, in case of error. */ - edata_t **to_leak, edata_t **to_salvage, - edata_t *expand_edata, size_t size, size_t alignment) { + edata_t **to_leak, edata_t **to_salvage, edata_t *expand_edata, size_t size, + size_t alignment) { size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata), - PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata); + PAGE_CEILING(alignment)) + - (uintptr_t)edata_base_get(*edata); assert(expand_edata == NULL || leadsize == 0); if (edata_size_get(*edata) < leadsize + size) { return extent_split_interior_cant_alloc; @@ -547,14 +549,14 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, assert(!edata_guarded_get(edata) || size == edata_size_get(edata)); malloc_mutex_assert_owner(tsdn, &ecache->mtx); - edata_t *lead; - edata_t *trail; - edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL); + edata_t *lead; + edata_t *trail; + edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL); edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL); - extent_split_interior_result_t result = extent_split_interior( - tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, - expand_edata, size, alignment); + extent_split_interior_result_t result = extent_split_interior(tsdn, pac, + ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, expand_edata, + size, alignment); if (!maps_coalesce && result != extent_split_interior_ok && !opt_retain) { @@ -615,8 +617,8 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, malloc_mutex_lock(tsdn, &ecache->mtx); - edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache, - expand_edata, size, alignment, guarded); + edata_t *edata = extent_recycle_extract( + tsdn, pac, ehooks, ecache, expand_edata, size, alignment, guarded); if (edata == NULL) { malloc_mutex_unlock(tsdn, &ecache->mtx); return NULL; @@ -630,8 +632,8 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, } assert(edata_state_get(edata) == extent_state_active); - if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero, - growing_retained)) { + if (extent_commit_zero( + tsdn, ehooks, edata, *commit, zero, growing_retained)) { extent_record(tsdn, pac, ehooks, ecache, edata); return NULL; } @@ -660,16 +662,16 @@ extent_handle_huge_arena_thp(tsdn_t *tsdn, pac_thp_t *pac_thp, * be within the range of [0, 2 * (HUGEPAGE - 1)]. */ void *huge_addr = HUGEPAGE_ADDR2BASE(addr); - void *huge_end = HUGEPAGE_ADDR2BASE((void *)((byte_t *)addr + - (uintptr_t)(size + HUGEPAGE - 1))); + void *huge_end = HUGEPAGE_ADDR2BASE( + (void *)((byte_t *)addr + (uintptr_t)(size + HUGEPAGE - 1))); assert((uintptr_t)huge_end > (uintptr_t)huge_addr); size_t huge_size = (uintptr_t)huge_end - (uintptr_t)huge_addr; - assert(huge_size <= (size + ((HUGEPAGE - 1) << 1)) && - huge_size >= size); + assert( + huge_size <= (size + ((HUGEPAGE - 1) << 1)) && huge_size >= size); - if (opt_metadata_thp == metadata_thp_always || - pac_thp->auto_thp_switched) { + if (opt_metadata_thp == metadata_thp_always + || pac_thp->auto_thp_switched) { pages_huge(huge_addr, huge_size); } else { assert(opt_metadata_thp == metadata_thp_auto); @@ -687,8 +689,10 @@ extent_handle_huge_arena_thp(tsdn_t *tsdn, pac_thp_t *pac_thp, if (edata != NULL) { edata_addr_set(edata, huge_addr); edata_size_set(edata, huge_size); - edata_list_active_append(&pac_thp->thp_lazy_list, edata); - atomic_fetch_add_u(&pac_thp->n_thp_lazy, 1, ATOMIC_RELAXED); + edata_list_active_append( + &pac_thp->thp_lazy_list, edata); + atomic_fetch_add_u( + &pac_thp->n_thp_lazy, 1, ATOMIC_RELAXED); } malloc_mutex_unlock(tsdn, &pac_thp->lock); } @@ -702,8 +706,8 @@ extent_handle_huge_arena_thp(tsdn_t *tsdn, pac_thp_t *pac_thp, * virtual memory ranges retained by each shard. */ static edata_t * -extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - size_t size, size_t alignment, bool zero, bool *commit) { +extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, + size_t alignment, bool zero, bool *commit) { malloc_mutex_assert_owner(tsdn, &pac->grow_mtx); size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; @@ -715,10 +719,10 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, * Find the next extent size in the series that would be large enough to * satisfy this request. */ - size_t alloc_size; + size_t alloc_size; pszind_t exp_grow_skip; - bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min, - &alloc_size, &exp_grow_skip); + bool err = exp_grow_size_prepare( + &pac->exp_grow, alloc_size_min, &alloc_size, &exp_grow_skip); if (err) { goto label_err; } @@ -730,8 +734,8 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, bool zeroed = false; bool committed = false; - void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed, - &committed); + void *ptr = ehooks_alloc( + tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed, &committed); if (ptr == NULL) { edata_cache_put(tsdn, pac->edata_cache, edata); @@ -752,23 +756,23 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, *commit = true; } - edata_t *lead; - edata_t *trail; - edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL); + edata_t *lead; + edata_t *trail; + edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL); edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL); - extent_split_interior_result_t result = extent_split_interior(tsdn, - pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL, - size, alignment); + extent_split_interior_result_t result = extent_split_interior(tsdn, pac, + ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL, size, + alignment); if (result == extent_split_interior_ok) { if (lead != NULL) { - extent_record(tsdn, pac, ehooks, &pac->ecache_retained, - lead); + extent_record( + tsdn, pac, ehooks, &pac->ecache_retained, lead); } if (trail != NULL) { - extent_record(tsdn, pac, ehooks, &pac->ecache_retained, - trail); + extent_record( + tsdn, pac, ehooks, &pac->ecache_retained, trail); } } else { /* @@ -792,15 +796,15 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } if (*commit && !edata_committed_get(edata)) { - if (extent_commit_impl(tsdn, ehooks, edata, 0, - edata_size_get(edata), true)) { - extent_record(tsdn, pac, ehooks, - &pac->ecache_retained, edata); + if (extent_commit_impl( + tsdn, ehooks, edata, 0, edata_size_get(edata), true)) { + extent_record( + tsdn, pac, ehooks, &pac->ecache_retained, edata); goto label_err; } /* A successful commit should return zeroed memory. */ if (config_debug) { - void *addr = edata_addr_get(edata); + void *addr = edata_addr_get(edata); size_t *p = (size_t *)addr; /* Check the first page only. */ for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { @@ -819,8 +823,9 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, if (huge_arena_pac_thp.thp_madvise) { /* Avoid using HUGEPAGE when the grow size is less than HUGEPAGE. */ - if (ind != 0 && ind == huge_arena_ind && ehooks_are_default(ehooks) && - likely(alloc_size >= HUGEPAGE)) { + if (ind != 0 && ind == huge_arena_ind + && ehooks_are_default(ehooks) + && likely(alloc_size >= HUGEPAGE)) { extent_handle_huge_arena_thp(tsdn, &huge_arena_pac_thp, pac->edata_cache, ptr, alloc_size); } @@ -831,8 +836,8 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, extent_gdump_add(tsdn, edata); } if (zero && !edata_zeroed_get(edata)) { - ehooks_zero(tsdn, ehooks, edata_base_get(edata), - edata_size_get(edata)); + ehooks_zero( + tsdn, ehooks, edata_base_get(edata), edata_size_get(edata)); } return edata; label_err: @@ -858,8 +863,8 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, extent_gdump_add(tsdn, edata); } } else if (opt_retain && expand_edata == NULL && !guarded) { - edata = extent_grow_retained(tsdn, pac, ehooks, size, - alignment, zero, commit); + edata = extent_grow_retained( + tsdn, pac, ehooks, size, alignment, zero, commit); /* extent_grow_retained() always releases pac->grow_mtx. */ } else { malloc_mutex_unlock(tsdn, &pac->grow_mtx); @@ -875,12 +880,12 @@ extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, extent_assert_can_coalesce(inner, outer); eset_remove(&ecache->eset, outer); - bool err = extent_merge_impl(tsdn, pac, ehooks, - forward ? inner : outer, forward ? outer : inner, + bool err = extent_merge_impl(tsdn, pac, ehooks, forward ? inner : outer, + forward ? outer : inner, /* holding_core_locks */ true); if (err) { - extent_deactivate_check_state_locked(tsdn, pac, ecache, outer, - extent_state_merging); + extent_deactivate_check_state_locked( + tsdn, pac, ecache, outer, extent_state_merging); } return err; @@ -908,10 +913,12 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, /* Try to coalesce forward. */ edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap, edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true); - size_t max_next_neighbor = max_size > edata_size_get(edata) ? max_size - edata_size_get(edata) : 0; + size_t max_next_neighbor = max_size > edata_size_get(edata) + ? max_size - edata_size_get(edata) + : 0; if (next != NULL && edata_size_get(next) <= max_next_neighbor) { - if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata, - next, true)) { + if (!extent_coalesce( + tsdn, pac, ehooks, ecache, edata, next, true)) { if (ecache->delay_coalesce) { /* Do minimal coalescing. */ *coalesced = true; @@ -924,10 +931,12 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, /* Try to coalesce backward. */ edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap, edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false); - size_t max_prev_neighbor = max_size > edata_size_get(edata) ? max_size - edata_size_get(edata) : 0; + size_t max_prev_neighbor = max_size > edata_size_get(edata) + ? max_size - edata_size_get(edata) + : 0; if (prev != NULL && edata_size_get(prev) <= max_prev_neighbor) { if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata, - prev, false)) { + prev, false)) { edata = prev; if (ecache->delay_coalesce) { /* Do minimal coalescing. */ @@ -948,36 +957,33 @@ extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, static edata_t * extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced) { - return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata, - SC_LARGE_MAXCLASS, coalesced); + return extent_try_coalesce_impl( + tsdn, pac, ehooks, ecache, edata, SC_LARGE_MAXCLASS, coalesced); } static edata_t * extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, size_t max_size, bool *coalesced) { - return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata, - max_size, coalesced); + return extent_try_coalesce_impl( + tsdn, pac, ehooks, ecache, edata, max_size, coalesced); } /* Purge a single extent to retained / unmapped directly. */ static void -extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata) { +extent_maximally_purge( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { size_t extent_size = edata_size_get(edata); extent_dalloc_wrapper(tsdn, pac, ehooks, edata); if (config_stats) { /* Update stats accordingly. */ LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx); - locked_inc_u64(tsdn, - LOCKEDINT_MTX(*pac->stats_mtx), + locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx), &pac->stats->decay_dirty.nmadvise, 1); - locked_inc_u64(tsdn, - LOCKEDINT_MTX(*pac->stats_mtx), - &pac->stats->decay_dirty.purged, - extent_size >> LG_PAGE); + locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx), + &pac->stats->decay_dirty.purged, extent_size >> LG_PAGE); LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx); - atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size, - ATOMIC_RELAXED); + atomic_fetch_sub_zu( + &pac->stats->pac_mapped, extent_size, ATOMIC_RELAXED); } } @@ -988,9 +994,9 @@ extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *edata) { - assert((ecache->state != extent_state_dirty && - ecache->state != extent_state_muzzy) || - !edata_zeroed_get(edata)); + assert((ecache->state != extent_state_dirty + && ecache->state != extent_state_muzzy) + || !edata_zeroed_get(edata)); malloc_mutex_lock(tsdn, &ecache->mtx); @@ -1001,8 +1007,8 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, } if (!ecache->delay_coalesce) { bool coalesced_unused; - edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata, - &coalesced_unused); + edata = extent_try_coalesce( + tsdn, pac, ehooks, ecache, edata, &coalesced_unused); } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) { assert(ecache == &pac->ecache_dirty); /* Always coalesce large extents eagerly. */ @@ -1027,17 +1033,21 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, * the final coalescing that happens during the transition from dirty ecache * to muzzy/retained ecache states. */ - unsigned lg_max_coalesce = (unsigned)opt_lg_extent_max_active_fit; + unsigned lg_max_coalesce = (unsigned) + opt_lg_extent_max_active_fit; size_t edata_size = edata_size_get(edata); - size_t max_size = (SC_LARGE_MAXCLASS >> lg_max_coalesce) > edata_size ? (edata_size << lg_max_coalesce) : SC_LARGE_MAXCLASS; - bool coalesced; + size_t max_size = (SC_LARGE_MAXCLASS >> lg_max_coalesce) + > edata_size + ? (edata_size << lg_max_coalesce) + : SC_LARGE_MAXCLASS; + bool coalesced; do { assert(edata_state_get(edata) == extent_state_active); edata = extent_try_coalesce_large(tsdn, pac, ehooks, ecache, edata, max_size, &coalesced); } while (coalesced); - if (edata_size_get(edata) >= - atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED) + if (edata_size_get(edata) >= atomic_load_zu( + &pac->oversize_threshold, ATOMIC_RELAXED) && !background_thread_enabled() && extent_may_force_decay(pac)) { /* Shortcut to purge the oversize extent eagerly. */ @@ -1053,10 +1063,9 @@ label_skip_coalesce: } void -extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); +extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (extent_register(tsdn, pac, edata)) { edata_cache_put(tsdn, pac->edata_cache, edata); @@ -1066,14 +1075,14 @@ extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } static bool -extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata) { +extent_dalloc_wrapper_try( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { bool err; assert(edata_base_get(edata) != NULL); assert(edata_size_get(edata) != 0); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); edata_addr_set(edata, edata_base_get(edata)); @@ -1089,8 +1098,8 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } edata_t * -extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, +extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, void *new_addr, + size_t size, size_t alignment, bool zero, bool *commit, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); @@ -1100,14 +1109,14 @@ extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, return NULL; } size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); - void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment, - &zero, commit); + void *addr = ehooks_alloc( + tsdn, ehooks, new_addr, size, palignment, &zero, commit); if (addr == NULL) { edata_cache_put(tsdn, pac->edata_cache, edata); return NULL; } - edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr, - size, /* slab */ false, SC_NSIZES, extent_sn_next(pac), + edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr, size, + /* slab */ false, SC_NSIZES, extent_sn_next(pac), extent_state_active, zero, *commit, EXTENT_PAI_PAC, opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD); /* @@ -1125,8 +1134,8 @@ extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } static void -extent_dalloc_wrapper_finish(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata) { +extent_dalloc_wrapper_finish( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { if (config_prof) { extent_gdump_sub(tsdn, edata); } @@ -1134,11 +1143,11 @@ extent_dalloc_wrapper_finish(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } void -extent_dalloc_wrapper_purged(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata) { +extent_dalloc_wrapper_purged( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { assert(edata_pai_get(edata) == EXTENT_PAI_PAC); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Verify that will not go down the dalloc / munmap route. */ assert(ehooks_dalloc_will_fail(ehooks)); @@ -1148,19 +1157,19 @@ extent_dalloc_wrapper_purged(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } void -extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata) { +extent_dalloc_wrapper( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { assert(edata_pai_get(edata) == EXTENT_PAI_PAC); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Avoid calling the default extent_dalloc unless have to. */ if (!ehooks_dalloc_will_fail(ehooks)) { /* Remove guard pages for dalloc / unmap. */ if (edata_guarded_get(edata)) { assert(ehooks_are_default(ehooks)); - san_unguard_pages_two_sided(tsdn, ehooks, edata, - pac->emap); + san_unguard_pages_two_sided( + tsdn, ehooks, edata, pac->emap); } /* * Deregister first to avoid a race with other allocating @@ -1177,15 +1186,15 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, bool zeroed; if (!edata_committed_get(edata)) { zeroed = true; - } else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0, - edata_size_get(edata))) { + } else if (!extent_decommit_wrapper( + tsdn, ehooks, edata, 0, edata_size_get(edata))) { zeroed = true; } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata), - edata_size_get(edata), 0, edata_size_get(edata))) { + edata_size_get(edata), 0, edata_size_get(edata))) { zeroed = true; - } else if (edata_state_get(edata) == extent_state_muzzy || - !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata), - edata_size_get(edata), 0, edata_size_get(edata))) { + } else if (edata_state_get(edata) == extent_state_muzzy + || !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), 0, edata_size_get(edata))) { zeroed = false; } else { zeroed = false; @@ -1196,15 +1205,15 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } void -extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata) { +extent_destroy_wrapper( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { assert(edata_base_get(edata) != NULL); assert(edata_size_get(edata) != 0); extent_state_t state = edata_state_get(edata); assert(state == extent_state_retained || state == extent_state_active); assert(emap_edata_is_acquired(tsdn, pac->emap, edata)); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (edata_guarded_get(edata)) { assert(opt_retain); @@ -1240,8 +1249,8 @@ extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, static bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata), edata_size_get(edata), offset, length); edata_committed_set(edata, edata_committed_get(edata) && err); @@ -1261,8 +1270,8 @@ extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length) { - return extent_purge_lazy_impl(tsdn, ehooks, edata, offset, - length, false); + return extent_purge_lazy_impl( + tsdn, ehooks, edata, offset, length, false); } static bool @@ -1278,8 +1287,8 @@ extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length) { - return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length, - false); + return extent_purge_forced_impl( + tsdn, ehooks, edata, offset, length, false); } /* @@ -1290,16 +1299,16 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, * and returns the trail (except in case of error). */ static edata_t * -extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) { +extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata, + size_t size_a, size_t size_b, bool holding_core_locks) { assert(edata_size_get(edata) == size_a + size_b); /* Only the shrink path may split w/o holding core locks. */ if (holding_core_locks) { witness_assert_positive_depth_to_rank( tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE); } else { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); } if (ehooks_split_will_fail(ehooks)) { @@ -1317,8 +1326,8 @@ extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_state_get(edata), edata_zeroed_get(edata), edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD); emap_prepare_t prepare; - bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata, - size_a, trail, size_b); + bool err = emap_split_prepare( + tsdn, pac->emap, &prepare, edata, size_a, trail, size_b); if (err) { goto label_error_b; } @@ -1340,8 +1349,8 @@ extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, } edata_size_set(edata, size_a); - emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail, - size_b); + emap_split_commit( + tsdn, pac->emap, &prepare, edata, size_a, trail, size_b); return trail; label_error_b: @@ -1353,8 +1362,8 @@ label_error_a: edata_t * extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) { - return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b, - holding_core_locks); + return extent_split_impl( + tsdn, pac, ehooks, edata, size_a, size_b, holding_core_locks); } static bool @@ -1365,8 +1374,8 @@ extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, witness_assert_positive_depth_to_rank( tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE); } else { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); } assert(edata_base_get(a) < edata_base_get(b)); @@ -1391,12 +1400,13 @@ extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, emap_prepare_t prepare; emap_merge_prepare(tsdn, pac->emap, &prepare, a, b); - assert(edata_state_get(a) == extent_state_active || - edata_state_get(a) == extent_state_merging); + assert(edata_state_get(a) == extent_state_active + || edata_state_get(a) == extent_state_merging); edata_state_set(a, extent_state_active); edata_size_set(a, edata_size_get(a) + edata_size_get(b)); - edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ? - edata_sn_get(a) : edata_sn_get(b)); + edata_sn_set(a, + (edata_sn_get(a) < edata_sn_get(b)) ? edata_sn_get(a) + : edata_sn_get(b)); edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b)); emap_merge_commit(tsdn, pac->emap, &prepare, a, b); @@ -1407,26 +1417,26 @@ extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, } bool -extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - edata_t *a, edata_t *b) { +extent_merge_wrapper( + tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, edata_t *b) { return extent_merge_impl(tsdn, pac, ehooks, a, b, /* holding_core_locks */ false); } bool -extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - bool commit, bool zero, bool growing_retained) { +extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, bool commit, + bool zero, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); if (commit && !edata_committed_get(edata)) { if (extent_commit_impl(tsdn, ehooks, edata, 0, - edata_size_get(edata), growing_retained)) { + edata_size_get(edata), growing_retained)) { return true; } } if (zero && !edata_zeroed_get(edata)) { - void *addr = edata_base_get(edata); + void *addr = edata_base_get(edata); size_t size = edata_size_get(edata); ehooks_zero(tsdn, ehooks, addr, size); } diff --git a/src/extent_dss.c b/src/extent_dss.c index 32fb4112..3f7a15d0 100644 --- a/src/extent_dss.c +++ b/src/extent_dss.c @@ -11,14 +11,10 @@ /* NOLINTNEXTLINE(performance-no-int-to-ptr) */ #define SBRK_INVALID ((void *)-1) -const char *opt_dss = DSS_DEFAULT; +const char *opt_dss = DSS_DEFAULT; -const char *const dss_prec_names[] = { - "disabled", - "primary", - "secondary", - "N/A" -}; +const char *const dss_prec_names[] = { + "disabled", "primary", "secondary", "N/A"}; /* * Current dss precedence default, used when creating new arenas. NB: This is @@ -26,17 +22,16 @@ const char *const dss_prec_names[] = { * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use * atomic operations to synchronize the setting. */ -static atomic_u_t dss_prec_default = ATOMIC_INIT( - (unsigned)DSS_PREC_DEFAULT); +static atomic_u_t dss_prec_default = ATOMIC_INIT((unsigned)DSS_PREC_DEFAULT); /* Base address of the DSS. */ -static void *dss_base; +static void *dss_base; /* Atomic boolean indicating whether a thread is currently extending DSS. */ -static atomic_b_t dss_extending; +static atomic_b_t dss_extending; /* Atomic boolean indicating whether the DSS is exhausted. */ -static atomic_b_t dss_exhausted; +static atomic_b_t dss_exhausted; /* Atomic current upper limit on DSS addresses. */ -static atomic_p_t dss_max; +static atomic_p_t dss_max; /******************************************************************************/ @@ -76,7 +71,7 @@ extent_dss_extending_start(void) { while (true) { bool expected = false; if (atomic_compare_exchange_weak_b(&dss_extending, &expected, - true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { + true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { break; } spin_adaptive(&spinner); @@ -143,24 +138,24 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, goto label_oom; } - bool head_state = opt_retain ? EXTENT_IS_HEAD : - EXTENT_NOT_HEAD; + bool head_state = opt_retain ? EXTENT_IS_HEAD + : EXTENT_NOT_HEAD; /* * Compute how much page-aligned gap space (if any) is * necessary to satisfy alignment. This space can be * recycled for later use. */ - void *gap_addr_page = ALIGNMENT_ADDR2CEILING(max_cur, - PAGE); + void *gap_addr_page = ALIGNMENT_ADDR2CEILING( + max_cur, PAGE); void *ret = ALIGNMENT_ADDR2CEILING( gap_addr_page, alignment); - size_t gap_size_page = (uintptr_t)ret - - (uintptr_t)gap_addr_page; + size_t gap_size_page = (uintptr_t)ret + - (uintptr_t)gap_addr_page; if (gap_size_page != 0) { edata_init(gap, arena_ind_get(arena), gap_addr_page, gap_size_page, false, - SC_NSIZES, extent_sn_next( - &arena->pa_shard.pac), + SC_NSIZES, + extent_sn_next(&arena->pa_shard.pac), extent_state_active, false, true, EXTENT_PAI_PAC, head_state); } @@ -169,25 +164,25 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, * allocation space. */ void *dss_next = (void *)((byte_t *)ret + size); - if ((uintptr_t)ret < (uintptr_t)max_cur || - (uintptr_t)dss_next < (uintptr_t)max_cur) { + if ((uintptr_t)ret < (uintptr_t)max_cur + || (uintptr_t)dss_next < (uintptr_t)max_cur) { goto label_oom; /* Wrap-around. */ } /* Compute the increment, including subpage bytes. */ - void *gap_addr_subpage = max_cur; - size_t gap_size_subpage = (uintptr_t)ret - - (uintptr_t)gap_addr_subpage; + void *gap_addr_subpage = max_cur; + size_t gap_size_subpage = (uintptr_t)ret + - (uintptr_t)gap_addr_subpage; intptr_t incr = gap_size_subpage + size; - assert((uintptr_t)max_cur + incr == (uintptr_t)ret + - size); + assert( + (uintptr_t)max_cur + incr == (uintptr_t)ret + size); /* Try to allocate. */ void *dss_prev = extent_dss_sbrk(incr); if (dss_prev == max_cur) { /* Success. */ - atomic_store_p(&dss_max, dss_next, - ATOMIC_RELEASE); + atomic_store_p( + &dss_max, dss_next, ATOMIC_RELEASE); extent_dss_extending_finish(); if (gap_size_page != 0) { @@ -203,17 +198,16 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, *commit = pages_decommit(ret, size); } if (*zero && *commit) { - edata_t edata = {0}; + edata_t edata = {0}; ehooks_t *ehooks = arena_get_ehooks( arena); - edata_init(&edata, - arena_ind_get(arena), ret, size, - size, false, SC_NSIZES, + edata_init(&edata, arena_ind_get(arena), + ret, size, size, false, SC_NSIZES, extent_state_active, false, true, EXTENT_PAI_PAC, head_state); if (extent_purge_forced_wrapper(tsdn, - ehooks, &edata, 0, size)) { + ehooks, &edata, 0, size)) { memset(ret, 0, size); } } @@ -225,8 +219,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, */ if (dss_prev == SBRK_INVALID) { /* OOM. */ - atomic_store_b(&dss_exhausted, true, - ATOMIC_RELEASE); + atomic_store_b( + &dss_exhausted, true, ATOMIC_RELEASE); goto label_oom; } } @@ -239,16 +233,16 @@ label_oom: static bool extent_in_dss_helper(void *addr, void *max) { - return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < - (uintptr_t)max); + return ((uintptr_t)addr >= (uintptr_t)dss_base + && (uintptr_t)addr < (uintptr_t)max); } bool extent_in_dss(void *addr) { cassert(have_dss); - return extent_in_dss_helper(addr, atomic_load_p(&dss_max, - ATOMIC_ACQUIRE)); + return extent_in_dss_helper( + addr, atomic_load_p(&dss_max, ATOMIC_ACQUIRE)); } bool @@ -257,14 +251,14 @@ extent_dss_mergeable(void *addr_a, void *addr_b) { cassert(have_dss); - if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < - (uintptr_t)dss_base) { + if ((uintptr_t)addr_a < (uintptr_t)dss_base + && (uintptr_t)addr_b < (uintptr_t)dss_base) { return true; } max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); - return (extent_in_dss_helper(addr_a, max) == - extent_in_dss_helper(addr_b, max)); + return (extent_in_dss_helper(addr_a, max) + == extent_in_dss_helper(addr_b, max)); } void @@ -273,7 +267,8 @@ extent_dss_boot(void) { dss_base = extent_dss_sbrk(0); atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); - atomic_store_b(&dss_exhausted, dss_base == SBRK_INVALID, ATOMIC_RELAXED); + atomic_store_b( + &dss_exhausted, dss_base == SBRK_INVALID, ATOMIC_RELAXED); atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); } diff --git a/src/extent_mmap.c b/src/extent_mmap.c index 5f0ee2d2..d39bddc6 100644 --- a/src/extent_mmap.c +++ b/src/extent_mmap.c @@ -7,7 +7,7 @@ /******************************************************************************/ /* Data. */ -bool opt_retain = +bool opt_retain = #ifdef JEMALLOC_RETAIN true #else @@ -18,8 +18,8 @@ bool opt_retain = /******************************************************************************/ void * -extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit) { +extent_alloc_mmap( + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { assert(alignment == ALIGNMENT_CEILING(alignment, PAGE)); void *ret = pages_map(new_addr, size, alignment, commit); if (ret == NULL) { diff --git a/src/fxp.c b/src/fxp.c index 96585f0a..faeab207 100644 --- a/src/fxp.c +++ b/src/fxp.c @@ -83,8 +83,8 @@ fxp_parse(fxp_t *result, const char *str, char **end) { } assert(fractional_part < frac_div); - uint32_t fractional_repr = (uint32_t)( - (fractional_part << 16) / frac_div); + uint32_t fractional_repr = (uint32_t)((fractional_part << 16) + / frac_div); /* Success! */ *result = (integer_part << 16) + fractional_repr; @@ -99,7 +99,7 @@ fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) { uint32_t integer_part = fxp_round_down(a); uint32_t fractional_part = (a & ((1U << 16) - 1)); - int leading_fraction_zeros = 0; + int leading_fraction_zeros = 0; uint64_t fraction_digits = fractional_part; for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) { if (fraction_digits < (1U << 16) @@ -113,12 +113,12 @@ fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) { fraction_digits /= 10; } - size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".", - integer_part); + size_t printed = malloc_snprintf( + buf, FXP_BUF_SIZE, "%" FMTu32 ".", integer_part); for (int i = 0; i < leading_fraction_zeros; i++) { buf[printed] = '0'; printed++; } - malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64, - fraction_digits); + malloc_snprintf( + &buf[printed], FXP_BUF_SIZE - printed, "%" FMTu64, fraction_digits); } diff --git a/src/hook.c b/src/hook.c index 77a988d7..4270ad60 100644 --- a/src/hook.c +++ b/src/hook.c @@ -9,19 +9,19 @@ typedef struct hooks_internal_s hooks_internal_t; struct hooks_internal_s { hooks_t hooks; - bool in_use; + bool in_use; }; seq_define(hooks_internal_t, hooks) -static atomic_u_t nhooks = ATOMIC_INIT(0); -static seq_hooks_t hooks[HOOK_MAX]; + static atomic_u_t nhooks = ATOMIC_INIT(0); +static seq_hooks_t hooks[HOOK_MAX]; static malloc_mutex_t hooks_mu; bool hook_boot(void) { - return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK, - malloc_mutex_rank_exclusive); + return malloc_mutex_init( + &hooks_mu, "hooks", WITNESS_RANK_HOOK, malloc_mutex_rank_exclusive); } static void * @@ -84,20 +84,18 @@ hook_remove(tsdn_t *tsdn, void *opaque) { malloc_mutex_unlock(tsdn, &hooks_mu); } -#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \ -for (int for_each_hook_counter = 0; \ - for_each_hook_counter < HOOK_MAX; \ - for_each_hook_counter++) { \ - bool for_each_hook_success = seq_try_load_hooks( \ - (hooks_internal_ptr), &hooks[for_each_hook_counter]); \ - if (!for_each_hook_success) { \ - continue; \ - } \ - if (!(hooks_internal_ptr)->in_use) { \ - continue; \ - } -#define FOR_EACH_HOOK_END \ -} +#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \ + for (int for_each_hook_counter = 0; for_each_hook_counter < HOOK_MAX; \ + for_each_hook_counter++) { \ + bool for_each_hook_success = seq_try_load_hooks( \ + (hooks_internal_ptr), &hooks[for_each_hook_counter]); \ + if (!for_each_hook_success) { \ + continue; \ + } \ + if (!(hooks_internal_ptr)->in_use) { \ + continue; \ + } +#define FOR_EACH_HOOK_END } static bool * hook_reentrantp(void) { @@ -129,26 +127,25 @@ hook_reentrantp(void) { * untouched. */ static bool in_hook_global = true; - tsdn_t *tsdn = tsdn_fetch(); - bool *in_hook = tsdn_in_hookp_get(tsdn); - if (in_hook!= NULL) { + tsdn_t *tsdn = tsdn_fetch(); + bool *in_hook = tsdn_in_hookp_get(tsdn); + if (in_hook != NULL) { return in_hook; } return &in_hook_global; } -#define HOOK_PROLOGUE \ - if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \ - return; \ - } \ - bool *in_hook = hook_reentrantp(); \ - if (*in_hook) { \ - return; \ - } \ +#define HOOK_PROLOGUE \ + if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \ + return; \ + } \ + bool *in_hook = hook_reentrantp(); \ + if (*in_hook) { \ + return; \ + } \ *in_hook = true; -#define HOOK_EPILOGUE \ - *in_hook = false; +#define HOOK_EPILOGUE *in_hook = false; void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, @@ -157,10 +154,10 @@ hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, hooks_internal_t hook; FOR_EACH_HOOK_BEGIN(&hook) - hook_alloc h = hook.hooks.alloc_hook; - if (h != NULL) { - h(hook.hooks.extra, type, result, result_raw, args_raw); - } + hook_alloc h = hook.hooks.alloc_hook; + if (h != NULL) { + h(hook.hooks.extra, type, result, result_raw, args_raw); + } FOR_EACH_HOOK_END HOOK_EPILOGUE @@ -171,10 +168,10 @@ hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) { HOOK_PROLOGUE hooks_internal_t hook; FOR_EACH_HOOK_BEGIN(&hook) - hook_dalloc h = hook.hooks.dalloc_hook; - if (h != NULL) { - h(hook.hooks.extra, type, address, args_raw); - } + hook_dalloc h = hook.hooks.dalloc_hook; + if (h != NULL) { + h(hook.hooks.extra, type, address, args_raw); + } FOR_EACH_HOOK_END HOOK_EPILOGUE } @@ -185,11 +182,11 @@ hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, HOOK_PROLOGUE hooks_internal_t hook; FOR_EACH_HOOK_BEGIN(&hook) - hook_expand h = hook.hooks.expand_hook; - if (h != NULL) { - h(hook.hooks.extra, type, address, old_usize, new_usize, - result_raw, args_raw); - } + hook_expand h = hook.hooks.expand_hook; + if (h != NULL) { + h(hook.hooks.extra, type, address, old_usize, new_usize, + result_raw, args_raw); + } FOR_EACH_HOOK_END HOOK_EPILOGUE } diff --git a/src/hpa.c b/src/hpa.c index 48e356c6..03668f06 100644 --- a/src/hpa.c +++ b/src/hpa.c @@ -12,17 +12,17 @@ static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, bool guarded, bool frequent_reuse, bool *deferred_work_generated); -static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, - size_t nallocs, edata_list_active_t *results, bool frequent_reuse, - bool *deferred_work_generated); -static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, - size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); -static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, - size_t old_size, size_t new_size, bool *deferred_work_generated); -static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated); -static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, - edata_list_active_t *list, bool *deferred_work_generated); +static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results, bool frequent_reuse, + bool *deferred_work_generated); +static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); +static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated); +static void hpa_dalloc( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated); +static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, + edata_list_active_t *list, bool *deferred_work_generated); static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self); bool @@ -70,7 +70,8 @@ hpa_do_consistency_checks(hpa_shard_t *shard) { } bool -hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) { +hpa_central_init( + hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) { /* malloc_conf processing should have filtered out these cases. */ assert(hpa_supported()); bool err; @@ -89,8 +90,8 @@ hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) static hpdata_t * hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) { - return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t), - CACHELINE); + return (hpdata_t *)base_alloc( + tsdn, central->base, sizeof(hpdata_t), CACHELINE); } static hpdata_t * @@ -137,8 +138,8 @@ hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size, */ bool commit = true; /* Allocate address space, bailing if we fail. */ - void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE, - &commit); + void *new_eden = pages_map( + NULL, HPA_EDEN_SIZE, HUGEPAGE, &commit); if (new_eden == NULL) { *oom = true; malloc_mutex_unlock(tsdn, ¢ral->grow_mtx); @@ -243,8 +244,8 @@ hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap, * locking here. */ static void -hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst, - hpa_shard_nonderived_stats_t *src) { +hpa_shard_nonderived_stats_accum( + hpa_shard_nonderived_stats_t *dst, hpa_shard_nonderived_stats_t *src) { dst->npurge_passes += src->npurge_passes; dst->npurges += src->npurges; dst->nhugifies += src->nhugifies; @@ -255,13 +256,13 @@ hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst, void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) { psset_stats_accum(&dst->psset_stats, &src->psset_stats); - hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, - &src->nonderived_stats); + hpa_shard_nonderived_stats_accum( + &dst->nonderived_stats, &src->nonderived_stats); } void -hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, - hpa_shard_stats_t *dst) { +hpa_shard_stats_merge( + tsdn_t *tsdn, hpa_shard_t *shard, hpa_shard_stats_t *dst) { hpa_do_consistency_checks(shard); malloc_mutex_lock(tsdn, &shard->grow_mtx); @@ -295,8 +296,8 @@ hpa_ndirty_max(tsdn_t *tsdn, hpa_shard_t *shard) { if (shard->opts.dirty_mult == (fxp_t)-1) { return (size_t)-1; } - return fxp_mul_frac(psset_nactive(&shard->psset), - shard->opts.dirty_mult); + return fxp_mul_frac( + psset_nactive(&shard->psset), shard->opts.dirty_mult); } static bool @@ -307,7 +308,8 @@ hpa_hugify_blocked_by_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) { return false; } return hpa_adjusted_ndirty(tsdn, shard) - + hpdata_nretained_get(to_hugify) > hpa_ndirty_max(tsdn, shard); + + hpdata_nretained_get(to_hugify) + > hpa_ndirty_max(tsdn, shard); } static bool @@ -323,8 +325,8 @@ hpa_should_purge(tsdn_t *tsdn, hpa_shard_t *shard) { } static void -hpa_update_purge_hugify_eligibility(tsdn_t *tsdn, hpa_shard_t *shard, - hpdata_t *ps) { +hpa_update_purge_hugify_eligibility( + tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) { malloc_mutex_assert_owner(tsdn, &shard->mtx); if (hpdata_changing_state_get(ps)) { hpdata_purge_allowed_set(ps, false); @@ -397,7 +399,7 @@ hpa_shard_has_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) { #define HPA_PURGE_BATCH_MAX_DEFAULT 16 #ifndef JEMALLOC_JET -#define HPA_PURGE_BATCH_MAX HPA_PURGE_BATCH_MAX_DEFAULT +# define HPA_PURGE_BATCH_MAX HPA_PURGE_BATCH_MAX_DEFAULT #else size_t hpa_purge_max_batch_size_for_test = HPA_PURGE_BATCH_MAX_DEFAULT; size_t @@ -406,20 +408,21 @@ hpa_purge_max_batch_size_for_test_set(size_t new_size) { hpa_purge_max_batch_size_for_test = new_size; return old_size; } -#define HPA_PURGE_BATCH_MAX hpa_purge_max_batch_size_for_test +# define HPA_PURGE_BATCH_MAX hpa_purge_max_batch_size_for_test #endif static inline size_t hpa_process_madvise_max_iovec_len(void) { - assert(opt_process_madvise_max_batch <= - PROCESS_MADVISE_MAX_BATCH_LIMIT); - return opt_process_madvise_max_batch == 0 ? - HPA_MIN_VAR_VEC_SIZE : opt_process_madvise_max_batch; + assert( + opt_process_madvise_max_batch <= PROCESS_MADVISE_MAX_BATCH_LIMIT); + return opt_process_madvise_max_batch == 0 + ? HPA_MIN_VAR_VEC_SIZE + : opt_process_madvise_max_batch; } static inline void -hpa_purge_actual_unlocked(hpa_shard_t *shard, hpa_purge_item_t *batch, - size_t batch_sz) { +hpa_purge_actual_unlocked( + hpa_shard_t *shard, hpa_purge_item_t *batch, size_t batch_sz) { assert(batch_sz > 0); size_t len = hpa_process_madvise_max_iovec_len(); @@ -433,17 +436,18 @@ hpa_purge_actual_unlocked(hpa_shard_t *shard, hpa_purge_item_t *batch, /* Actually do the purging, now that the lock is dropped. */ if (batch[i].dehugify) { - shard->central->hooks.dehugify(hpdata_addr_get(to_purge), - HUGEPAGE); + shard->central->hooks.dehugify( + hpdata_addr_get(to_purge), HUGEPAGE); } - void *purge_addr; + void *purge_addr; size_t purge_size; size_t total_purged_on_one_hp = 0; while (hpdata_purge_next( - to_purge, &batch[i].state, &purge_addr, &purge_size)) { + to_purge, &batch[i].state, &purge_addr, &purge_size)) { total_purged_on_one_hp += purge_size; assert(total_purged_on_one_hp <= HUGEPAGE); - hpa_range_accum_add(&accum, purge_addr, purge_size, shard); + hpa_range_accum_add( + &accum, purge_addr, purge_size, shard); } } hpa_range_accum_finish(&accum, shard); @@ -490,10 +494,10 @@ hpa_purge_start_hp(hpa_purge_batch_t *b, psset_t *psset) { /* Gather all the metadata we'll need during the purge. */ hp_item->dehugify = hpdata_huge_get(hp_item->hp); size_t nranges; - size_t ndirty = - hpdata_purge_begin(hp_item->hp, &hp_item->state, &nranges); + size_t ndirty = hpdata_purge_begin( + hp_item->hp, &hp_item->state, &nranges); /* We picked hp to purge, so it should have some dirty ranges */ - assert(ndirty > 0 && nranges >0); + assert(ndirty > 0 && nranges > 0); b->ndirty_in_batch += ndirty; b->nranges += nranges; return ndirty; @@ -501,8 +505,8 @@ hpa_purge_start_hp(hpa_purge_batch_t *b, psset_t *psset) { /* Finish purge of one huge page. */ static inline void -hpa_purge_finish_hp(tsdn_t *tsdn, hpa_shard_t *shard, - hpa_purge_item_t *hp_item) { +hpa_purge_finish_hp( + tsdn_t *tsdn, hpa_shard_t *shard, hpa_purge_item_t *hp_item) { if (hp_item->dehugify) { shard->stats.ndehugifies++; } @@ -523,9 +527,9 @@ hpa_purge_finish_hp(tsdn_t *tsdn, hpa_shard_t *shard, static inline bool hpa_batch_full(hpa_purge_batch_t *b) { /* It's okay for ranges to go above */ - return b->npurged_hp_total == b->max_hp || - b->item_cnt == b->items_capacity || - b->nranges >= b->range_watermark; + return b->npurged_hp_total == b->max_hp + || b->item_cnt == b->items_capacity + || b->nranges >= b->range_watermark; } static inline void @@ -547,23 +551,25 @@ hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, size_t max_hp) { assert(max_hp > 0); assert(HPA_PURGE_BATCH_MAX > 0); - assert(HPA_PURGE_BATCH_MAX < - (VARIABLE_ARRAY_SIZE_MAX / sizeof(hpa_purge_item_t))); + assert(HPA_PURGE_BATCH_MAX + < (VARIABLE_ARRAY_SIZE_MAX / sizeof(hpa_purge_item_t))); VARIABLE_ARRAY(hpa_purge_item_t, items, HPA_PURGE_BATCH_MAX); hpa_purge_batch_t batch = { - .max_hp = max_hp, - .npurged_hp_total = 0, - .items = &items[0], - .items_capacity = HPA_PURGE_BATCH_MAX, - .range_watermark = hpa_process_madvise_max_iovec_len(), + .max_hp = max_hp, + .npurged_hp_total = 0, + .items = &items[0], + .items_capacity = HPA_PURGE_BATCH_MAX, + .range_watermark = hpa_process_madvise_max_iovec_len(), }; assert(batch.range_watermark > 0); while (1) { hpa_batch_pass_start(&batch); assert(hpa_batch_empty(&batch)); - while(!hpa_batch_full(&batch) && hpa_should_purge(tsdn, shard)) { - size_t ndirty = hpa_purge_start_hp(&batch, &shard->psset); + while ( + !hpa_batch_full(&batch) && hpa_should_purge(tsdn, shard)) { + size_t ndirty = hpa_purge_start_hp( + &batch, &shard->psset); if (ndirty == 0) { break; } @@ -582,8 +588,8 @@ hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, size_t max_hp) { shard->npending_purge -= batch.ndirty_in_batch; shard->stats.npurges += batch.ndirty_in_batch; shard->central->hooks.curtime(&shard->last_purge, - /* first_reading */ false); - for (size_t i=0; imtx); - bool err = shard->central->hooks.hugify(hpdata_addr_get(to_hugify), - HUGEPAGE, shard->opts.hugify_sync); + bool err = shard->central->hooks.hugify( + hpdata_addr_get(to_hugify), HUGEPAGE, shard->opts.hugify_sync); malloc_mutex_lock(tsdn, &shard->mtx); shard->stats.nhugifies++; @@ -669,8 +675,8 @@ hpa_min_purge_interval_passed(tsdn_t *tsdn, hpa_shard_t *shard) { * hpa_shard_do_deferred_work() call. */ static void -hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard, - bool forced) { +hpa_shard_maybe_do_deferred_work( + tsdn_t *tsdn, hpa_shard_t *shard, bool forced) { malloc_mutex_assert_owner(tsdn, &shard->mtx); if (!forced && shard->opts.deferral_allowed) { return; @@ -704,8 +710,7 @@ hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard, * of purging algorithm. */ ssize_t max_purge_nhp = shard->opts.experimental_max_purge_nhp; - if (max_purge_nhp != -1 && - max_purges > (size_t)max_purge_nhp) { + if (max_purge_nhp != -1 && max_purges > (size_t)max_purge_nhp) { max_purges = max_purge_nhp; } @@ -725,9 +730,9 @@ hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard, } static edata_t * -hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, - bool *oom) { - bool err; +hpa_try_alloc_one_no_grow( + tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom) { + bool err; edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf); if (edata == NULL) { *oom = true; @@ -754,8 +759,8 @@ hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, } void *addr = hpdata_reserve_alloc(ps, size); - edata_init(edata, shard->ind, addr, size, /* slab */ false, - SC_NSIZES, /* sn */ hpdata_age_get(ps), extent_state_active, + edata_init(edata, shard->ind, addr, size, /* slab */ false, SC_NSIZES, + /* sn */ hpdata_age_get(ps), extent_state_active, /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA, EXTENT_NOT_HEAD); edata_ps_set(edata, ps); @@ -768,11 +773,11 @@ hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, * dropped. This would force us to deal with a pageslab eviction down * the error pathway, which is a pain. */ - err = emap_register_boundary(tsdn, shard->emap, edata, - SC_NSIZES, /* slab */ false); + err = emap_register_boundary( + tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false); if (err) { - hpdata_unreserve(ps, edata_addr_get(edata), - edata_size_get(edata)); + hpdata_unreserve( + ps, edata_addr_get(edata), edata_size_get(edata)); /* * We should arguably reset dirty state here, but this would * require some sort of prepare + commit functionality that's a @@ -800,8 +805,8 @@ hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, malloc_mutex_lock(tsdn, &shard->mtx); size_t nsuccess = 0; for (; nsuccess < nallocs; nsuccess++) { - edata_t *edata = hpa_try_alloc_one_no_grow(tsdn, shard, size, - oom); + edata_t *edata = hpa_try_alloc_one_no_grow( + tsdn, shard, size, oom); if (edata == NULL) { break; } @@ -819,12 +824,11 @@ hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated) { assert(size <= HUGEPAGE); - assert(size <= shard->opts.slab_max_alloc || - size == sz_s2u(size)); + assert(size <= shard->opts.slab_max_alloc || size == sz_s2u(size)); bool oom = false; - size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom, - nallocs, results, deferred_work_generated); + size_t nsuccess = hpa_try_alloc_batch_no_grow( + tsdn, shard, size, &oom, nallocs, results, deferred_work_generated); if (nsuccess == nallocs || oom) { return nsuccess; @@ -851,8 +855,8 @@ hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, * deallocations (and allocations of smaller sizes) may still succeed * while we're doing this potentially expensive system call. */ - hpdata_t *ps = hpa_central_extract(tsdn, shard->central, size, - shard->age_counter++, &oom); + hpdata_t *ps = hpa_central_extract( + tsdn, shard->central, size, shard->age_counter++, &oom); if (ps == NULL) { malloc_mutex_unlock(tsdn, &shard->grow_mtx); return nsuccess; @@ -894,8 +898,8 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, bool *deferred_work_generated) { assert(nallocs > 0); assert((size & PAGE_MASK) == 0); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); hpa_shard_t *shard = hpa_from_pai(self); /* @@ -908,16 +912,16 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, * huge page size). These requests do not concern internal * fragmentation with huge pages (again, the full size will be used). */ - if (!(frequent_reuse && size <= HUGEPAGE) && - (size > shard->opts.slab_max_alloc)) { + if (!(frequent_reuse && size <= HUGEPAGE) + && (size > shard->opts.slab_max_alloc)) { return 0; } - size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, nallocs, - results, deferred_work_generated); + size_t nsuccess = hpa_alloc_batch_psset( + tsdn, shard, size, nallocs, results, deferred_work_generated); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* * Guard the sanity checks with config_debug because the loop cannot be @@ -926,13 +930,13 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, */ if (config_debug) { edata_t *edata; - ql_foreach(edata, &results->head, ql_link_active) { + ql_foreach (edata, &results->head, ql_link_active) { emap_assert_mapped(tsdn, shard->emap, edata); assert(edata_pai_get(edata) == EXTENT_PAI_HPA); assert(edata_state_get(edata) == extent_state_active); assert(edata_arena_ind_get(edata) == shard->ind); - assert(edata_szind_get_maybe_invalid(edata) == - SC_NSIZES); + assert( + edata_szind_get_maybe_invalid(edata) == SC_NSIZES); assert(!edata_slab_get(edata)); assert(edata_committed_get(edata)); assert(edata_base_get(edata) == edata_addr_get(edata)); @@ -947,8 +951,8 @@ hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, bool guarded, bool frequent_reuse, bool *deferred_work_generated) { assert((size & PAGE_MASK) == 0); assert(!guarded); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* We don't handle alignment or zeroing for now. */ if (alignment > PAGE || zero) { @@ -975,8 +979,8 @@ hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, } static bool -hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, - size_t old_size, size_t new_size, bool *deferred_work_generated) { +hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool *deferred_work_generated) { /* Shrink not yet supported. */ return true; } @@ -1021,7 +1025,7 @@ hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) { hpdata_t *ps = edata_ps_get(edata); /* Currently, all edatas come from pageslabs. */ assert(ps != NULL); - void *unreserve_addr = edata_addr_get(edata); + void *unreserve_addr = edata_addr_get(edata); size_t unreserve_size = edata_size_get(edata); edata_cache_fast_put(tsdn, &shard->ecf, edata); @@ -1037,7 +1041,7 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list, hpa_shard_t *shard = hpa_from_pai(self); edata_t *edata; - ql_foreach(edata, &list->head, ql_link_active) { + ql_foreach (edata, &list->head, ql_link_active) { hpa_dalloc_prepare_unlocked(tsdn, shard, edata); } @@ -1048,15 +1052,14 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list, hpa_dalloc_locked(tsdn, shard, edata); } hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false); - *deferred_work_generated = - hpa_shard_has_deferred_work(tsdn, shard); + *deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard); malloc_mutex_unlock(tsdn, &shard->mtx); } static void -hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated) { +hpa_dalloc( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) { assert(!edata_guarded_get(edata)); /* Just a dalloc_batch of size 1; this lets us share logic. */ edata_list_active_t dalloc_list; @@ -1072,14 +1075,14 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { hpa_shard_t *shard = hpa_from_pai(self); - uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX; + uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX; malloc_mutex_lock(tsdn, &shard->mtx); hpdata_t *to_hugify = psset_pick_hugify(&shard->psset); if (to_hugify != NULL) { - nstime_t time_hugify_allowed = - hpdata_time_hugify_allowed(to_hugify); + nstime_t time_hugify_allowed = hpdata_time_hugify_allowed( + to_hugify); uint64_t since_hugify_allowed_ms = shard->central->hooks.ms_since(&time_hugify_allowed); /* @@ -1087,8 +1090,8 @@ hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { * sleep for the rest. */ if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) { - time_ns = shard->opts.hugify_delay_ms - - since_hugify_allowed_ms; + time_ns = shard->opts.hugify_delay_ms + - since_hugify_allowed_ms; time_ns *= 1000 * 1000; } else { malloc_mutex_unlock(tsdn, &shard->mtx); @@ -1110,8 +1113,8 @@ hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { if (since_last_purge_ms < shard->opts.min_purge_interval_ms) { uint64_t until_purge_ns; - until_purge_ns = shard->opts.min_purge_interval_ms - - since_last_purge_ms; + until_purge_ns = shard->opts.min_purge_interval_ms + - since_last_purge_ms; until_purge_ns *= 1000 * 1000; if (until_purge_ns < time_ns) { @@ -1176,8 +1179,8 @@ hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) { } void -hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard, - bool deferral_allowed) { +hpa_shard_set_deferral_allowed( + tsdn_t *tsdn, hpa_shard_t *shard, bool deferral_allowed) { hpa_do_consistency_checks(shard); malloc_mutex_lock(tsdn, &shard->mtx); diff --git a/src/hpa_hooks.c b/src/hpa_hooks.c index 072d490e..45bebe41 100644 --- a/src/hpa_hooks.c +++ b/src/hpa_hooks.c @@ -3,26 +3,18 @@ #include "jemalloc/internal/hpa_hooks.h" -static void *hpa_hooks_map(size_t size); -static void hpa_hooks_unmap(void *ptr, size_t size); -static void hpa_hooks_purge(void *ptr, size_t size); -static bool hpa_hooks_hugify(void *ptr, size_t size, bool sync); -static void hpa_hooks_dehugify(void *ptr, size_t size); -static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading); +static void *hpa_hooks_map(size_t size); +static void hpa_hooks_unmap(void *ptr, size_t size); +static void hpa_hooks_purge(void *ptr, size_t size); +static bool hpa_hooks_hugify(void *ptr, size_t size, bool sync); +static void hpa_hooks_dehugify(void *ptr, size_t size); +static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading); static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime); -static bool hpa_hooks_vectorized_purge( - void *vec, size_t vlen, size_t nbytes); +static bool hpa_hooks_vectorized_purge(void *vec, size_t vlen, size_t nbytes); -const hpa_hooks_t hpa_hooks_default = { - &hpa_hooks_map, - &hpa_hooks_unmap, - &hpa_hooks_purge, - &hpa_hooks_hugify, - &hpa_hooks_dehugify, - &hpa_hooks_curtime, - &hpa_hooks_ms_since, - &hpa_hooks_vectorized_purge -}; +const hpa_hooks_t hpa_hooks_default = {&hpa_hooks_map, &hpa_hooks_unmap, + &hpa_hooks_purge, &hpa_hooks_hugify, &hpa_hooks_dehugify, + &hpa_hooks_curtime, &hpa_hooks_ms_since, &hpa_hooks_vectorized_purge}; static void * hpa_hooks_map(size_t size) { @@ -82,13 +74,12 @@ hpa_hooks_ms_since(nstime_t *past_nstime) { return nstime_ms_since(past_nstime); } - /* Return true if we did not purge all nbytes, or on some error */ static bool hpa_hooks_vectorized_purge(void *vec, size_t vlen, size_t nbytes) { #ifdef JEMALLOC_HAVE_PROCESS_MADVISE - return pages_purge_process_madvise(vec, vlen, nbytes); + return pages_purge_process_madvise(vec, vlen, nbytes); #else - return true; + return true; #endif } diff --git a/src/hpdata.c b/src/hpdata.c index f3e347c4..9d324952 100644 --- a/src/hpdata.c +++ b/src/hpdata.c @@ -17,8 +17,7 @@ hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) { ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp) -void -hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) { + void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) { hpdata_addr_set(hpdata, addr); hpdata_age_set(hpdata, age); hpdata->h_huge = false; @@ -66,8 +65,8 @@ hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) { size_t largest_unchosen_range = 0; while (true) { - bool found = fb_urange_iter(hpdata->active_pages, - HUGEPAGE_PAGES, start, &begin, &len); + bool found = fb_urange_iter( + hpdata->active_pages, HUGEPAGE_PAGES, start, &begin, &len); /* * A precondition to this function is that hpdata must be able * to serve the allocation. @@ -97,8 +96,8 @@ hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) { * We might be about to dirty some memory for the first time; update our * count if so. */ - size_t new_dirty = fb_ucount(hpdata->touched_pages, HUGEPAGE_PAGES, - result, npages); + size_t new_dirty = fb_ucount( + hpdata->touched_pages, HUGEPAGE_PAGES, result, npages); fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages); hpdata->h_ntouched += new_dirty; @@ -129,8 +128,8 @@ hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) { } hpdata_assert_consistent(hpdata); - return (void *)( - (byte_t *)hpdata_addr_get(hpdata) + (result << LG_PAGE)); + return ( + void *)((byte_t *)hpdata_addr_get(hpdata) + (result << LG_PAGE)); } void @@ -148,10 +147,10 @@ hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) { fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages); /* We might have just created a new, larger range. */ - size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES, - begin) + 1); - size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES, - begin + npages - 1); + size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES, begin) + + 1); + size_t new_end = fb_ffs( + hpdata->active_pages, HUGEPAGE_PAGES, begin + npages - 1); size_t new_range_len = new_end - new_begin; if (new_range_len > old_longest_range) { @@ -164,8 +163,8 @@ hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) { } size_t -hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, - size_t *nranges) { +hpdata_purge_begin( + hpdata_t *hpdata, hpdata_purge_state_t *purge_state, size_t *nranges) { hpdata_assert_consistent(hpdata); /* * See the comment below; we might purge any inactive extent, so it's @@ -212,29 +211,29 @@ hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)]; fb_init(dirty_pages, HUGEPAGE_PAGES); fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES); - fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages, - HUGEPAGE_PAGES); + fb_bit_and( + dirty_pages, dirty_pages, hpdata->touched_pages, HUGEPAGE_PAGES); fb_init(purge_state->to_purge, HUGEPAGE_PAGES); size_t next_bit = 0; *nranges = 0; while (next_bit < HUGEPAGE_PAGES) { - size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES, - next_bit); + size_t next_dirty = fb_ffs( + dirty_pages, HUGEPAGE_PAGES, next_bit); /* Recall that fb_ffs returns nbits if no set bit is found. */ if (next_dirty == HUGEPAGE_PAGES) { break; } - size_t next_active = fb_ffs(hpdata->active_pages, - HUGEPAGE_PAGES, next_dirty); + size_t next_active = fb_ffs( + hpdata->active_pages, HUGEPAGE_PAGES, next_dirty); /* * Don't purge past the end of the dirty extent, into retained * pages. This helps the kernel a tiny bit, but honestly it's * mostly helpful for testing (where we tend to write test cases * that think in terms of the dirty ranges). */ - ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES, - next_active - 1); + ssize_t last_dirty = fb_fls( + dirty_pages, HUGEPAGE_PAGES, next_active - 1); assert(last_dirty >= 0); assert((size_t)last_dirty >= next_dirty); assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES); @@ -249,9 +248,9 @@ hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive; purge_state->ndirty_to_purge = ndirty; assert(ndirty <= fb_scount( - purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)); - assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0, - HUGEPAGE_PAGES)); + purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)); + assert(ndirty + == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)); assert(*nranges <= ndirty); assert(ndirty == 0 || *nranges > 0); @@ -281,8 +280,8 @@ hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, return false; } - *r_purge_addr = (void *)( - (byte_t *)hpdata_addr_get(hpdata) + purge_begin * PAGE); + *r_purge_addr = (void *)((byte_t *)hpdata_addr_get(hpdata) + + purge_begin * PAGE); *r_purge_size = purge_len * PAGE; purge_state->next_purge_search_begin = purge_begin + purge_len; @@ -299,12 +298,13 @@ hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) { /* See the comment in reserve. */ assert(!hpdata->h_in_psset || hpdata->h_updating); - assert(purge_state->npurged == fb_scount(purge_state->to_purge, - HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)); + assert(purge_state->npurged + == fb_scount( + purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)); assert(purge_state->npurged >= purge_state->ndirty_to_purge); - fb_bit_not(purge_state->to_purge, purge_state->to_purge, - HUGEPAGE_PAGES); + fb_bit_not( + purge_state->to_purge, purge_state->to_purge, HUGEPAGE_PAGES); fb_bit_and(hpdata->touched_pages, hpdata->touched_pages, purge_state->to_purge, HUGEPAGE_PAGES); assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge); diff --git a/src/inspect.c b/src/inspect.c index 2575b5c1..116e77a1 100644 --- a/src/inspect.c +++ b/src/inspect.c @@ -3,8 +3,8 @@ #include "jemalloc/internal/inspect.h" void -inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree, - size_t *nregs, size_t *size) { +inspect_extent_util_stats_get( + tsdn_t *tsdn, const void *ptr, size_t *nfree, size_t *nregs, size_t *size) { assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL); const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); @@ -57,7 +57,7 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, &arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED); assert(arena != NULL); const unsigned binshard = edata_binshard_get(edata); - bin_t *bin = arena_get_bin(arena, szind, binshard); + bin_t *bin = arena_get_bin(arena, szind, binshard); malloc_mutex_lock(tsdn, &bin->lock); if (config_stats) { diff --git a/src/jemalloc.c b/src/jemalloc.c index c6621a79..876c49e8 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -29,11 +29,11 @@ /* Data. */ /* Runtime configuration options. */ -const char *je_malloc_conf +const char *je_malloc_conf #ifndef _WIN32 JEMALLOC_ATTR(weak) #endif - ; + ; /* * The usual rule is that the closer to runtime you are, the higher priority * your configuration settings are (so the jemalloc config options get lower @@ -51,23 +51,23 @@ const char *je_malloc_conf * We don't actually want this to be widespread, so we'll give it a silly name * and not mention it in headers or documentation. */ -const char *je_malloc_conf_2_conf_harder +const char *je_malloc_conf_2_conf_harder #ifndef _WIN32 JEMALLOC_ATTR(weak) #endif - ; + ; const char *opt_malloc_conf_symlink = NULL; const char *opt_malloc_conf_env_var = NULL; -bool opt_abort = +bool opt_abort = #ifdef JEMALLOC_DEBUG true #else false #endif ; -bool opt_abort_conf = +bool opt_abort_conf = #ifdef JEMALLOC_DEBUG true #else @@ -75,29 +75,29 @@ bool opt_abort_conf = #endif ; /* Intentionally default off, even with debug builds. */ -bool opt_confirm_conf = false; -const char *opt_junk = +bool opt_confirm_conf = false; +const char *opt_junk = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) "true" #else "false" #endif ; -bool opt_junk_alloc = +bool opt_junk_alloc = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; -bool opt_junk_free = +bool opt_junk_free = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; -bool opt_trust_madvise = +bool opt_trust_madvise = #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS false #else @@ -131,9 +131,9 @@ atomic_zu_t zero_realloc_count = ATOMIC_INIT(0); bool opt_disable_large_size_classes = true; const char *const zero_realloc_mode_names[] = { - "alloc", - "free", - "abort", + "alloc", + "free", + "abort", }; /* @@ -143,27 +143,31 @@ const char *const zero_realloc_mode_names[] = { static const uint8_t junk_alloc_byte = 0xa5; static const uint8_t junk_free_byte = 0x5a; -static void default_junk_alloc(void *ptr, size_t usize) { +static void +default_junk_alloc(void *ptr, size_t usize) { memset(ptr, junk_alloc_byte, usize); } -static void default_junk_free(void *ptr, size_t usize) { +static void +default_junk_free(void *ptr, size_t usize) { memset(ptr, junk_free_byte, usize); } -void (*JET_MUTABLE junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc; -void (*JET_MUTABLE junk_free_callback)(void *ptr, size_t size) = &default_junk_free; +void (*JET_MUTABLE junk_alloc_callback)( + void *ptr, size_t size) = &default_junk_alloc; +void (*JET_MUTABLE junk_free_callback)( + void *ptr, size_t size) = &default_junk_free; void (*JET_MUTABLE invalid_conf_abort)(void) = &abort; -bool opt_utrace = false; -bool opt_xmalloc = false; -bool opt_experimental_infallible_new = false; -bool opt_experimental_tcache_gc = true; -bool opt_zero = false; -unsigned opt_narenas = 0; -static fxp_t opt_narenas_ratio = FXP_INIT_INT(4); +bool opt_utrace = false; +bool opt_xmalloc = false; +bool opt_experimental_infallible_new = false; +bool opt_experimental_tcache_gc = true; +bool opt_zero = false; +unsigned opt_narenas = 0; +static fxp_t opt_narenas_ratio = FXP_INIT_INT(4); -unsigned ncpus; +unsigned ncpus; unsigned opt_debug_double_free_max_scan = SAFETY_CHECK_DOUBLE_FREE_MAX_SCAN_DEFAULT; @@ -175,9 +179,9 @@ size_t opt_calloc_madvise_threshold = static malloc_mutex_t arenas_lock; /* The global hpa, and whether it's on. */ -bool opt_hpa = false; +bool opt_hpa = false; hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT; -sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT; +sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT; /* * Arenas that are used to service external requests. Not all elements of the @@ -190,48 +194,48 @@ sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT; * Points to an arena_t. */ JEMALLOC_ALIGNED(CACHELINE) -atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; -static atomic_u_t narenas_total; /* Use narenas_total_*(). */ +atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; +static atomic_u_t narenas_total; /* Use narenas_total_*(). */ /* Below three are read-only after initialization. */ -static arena_t *a0; /* arenas[0]. */ -unsigned narenas_auto; -unsigned manual_arena_base; +static arena_t *a0; /* arenas[0]. */ +unsigned narenas_auto; +unsigned manual_arena_base; malloc_init_t malloc_init_state = malloc_init_uninitialized; /* False should be the common case. Set to true to trigger initialization. */ -bool malloc_slow = true; +bool malloc_slow = true; /* When malloc_slow is true, set the corresponding bits for sanity check. */ enum { - flag_opt_junk_alloc = (1U), - flag_opt_junk_free = (1U << 1), - flag_opt_zero = (1U << 2), - flag_opt_utrace = (1U << 3), - flag_opt_xmalloc = (1U << 4) + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_zero = (1U << 2), + flag_opt_utrace = (1U << 3), + flag_opt_xmalloc = (1U << 4) }; -static uint8_t malloc_slow_flags; +static uint8_t malloc_slow_flags; #ifdef JEMALLOC_THREADED_INIT /* Used to let the initializing thread recursively allocate. */ -# define NO_INITIALIZER ((unsigned long)0) -# define INITIALIZER pthread_self() -# define IS_INITIALIZER (malloc_initializer == pthread_self()) -static pthread_t malloc_initializer = NO_INITIALIZER; +# define NO_INITIALIZER ((unsigned long)0) +# define INITIALIZER pthread_self() +# define IS_INITIALIZER (malloc_initializer == pthread_self()) +static pthread_t malloc_initializer = NO_INITIALIZER; #else -# define NO_INITIALIZER false -# define INITIALIZER true -# define IS_INITIALIZER malloc_initializer -static bool malloc_initializer = NO_INITIALIZER; +# define NO_INITIALIZER false +# define INITIALIZER true +# define IS_INITIALIZER malloc_initializer +static bool malloc_initializer = NO_INITIALIZER; #endif /* Used to avoid initialization races. */ #ifdef _WIN32 -#if _WIN32_WINNT >= 0x0600 -static malloc_mutex_t init_lock = SRWLOCK_INIT; -#else -static malloc_mutex_t init_lock; -static bool init_lock_initialized = false; +# if _WIN32_WINNT >= 0x0600 +static malloc_mutex_t init_lock = SRWLOCK_INIT; +# else +static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI @@ -253,36 +257,38 @@ _init_init_lock(void) { init_lock_initialized = true; } -#ifdef _MSC_VER -# pragma section(".CRT$XCU", read) -JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) -static const void (WINAPI *init_init_lock)(void) = _init_init_lock; -#endif -#endif +# ifdef _MSC_VER +# pragma section(".CRT$XCU", read) +JEMALLOC_SECTION(".CRT$XCU") +JEMALLOC_ATTR(used) static const + void(WINAPI *init_init_lock)(void) = _init_init_lock; +# endif +# endif #else -static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; +static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; #endif typedef struct { - void *p; /* Input pointer (as in realloc(p, s)). */ - size_t s; /* Request size. */ - void *r; /* Result pointer. */ + void *p; /* Input pointer (as in realloc(p, s)). */ + size_t s; /* Request size. */ + void *r; /* Result pointer. */ } malloc_utrace_t; #ifdef JEMALLOC_UTRACE -# define UTRACE(a, b, c) do { \ - if (unlikely(opt_utrace)) { \ - int utrace_serrno = errno; \ - malloc_utrace_t ut; \ - ut.p = (a); \ - ut.s = (b); \ - ut.r = (c); \ - UTRACE_CALL(&ut, sizeof(ut)); \ - errno = utrace_serrno; \ - } \ -} while (0) +# define UTRACE(a, b, c) \ + do { \ + if (unlikely(opt_utrace)) { \ + int utrace_serrno = errno; \ + malloc_utrace_t ut; \ + ut.p = (a); \ + ut.s = (b); \ + ut.r = (c); \ + UTRACE_CALL(&ut, sizeof(ut)); \ + errno = utrace_serrno; \ + } \ + } while (0) #else -# define UTRACE(a, b, c) +# define UTRACE(a, b, c) #endif /* Whether encountered any invalid config options. */ @@ -294,8 +300,8 @@ static bool had_conf_error = false; * definition. */ -static bool malloc_init_hard_a0(void); -static bool malloc_init_hard(void); +static bool malloc_init_hard_a0(void); +static bool malloc_init_hard(void); /******************************************************************************/ /* @@ -442,8 +448,10 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { if (have_background_thread) { if (background_thread_create(tsdn_tsd(tsdn), ind)) { - malloc_printf(": error in background thread " - "creation for arena %u. Abort.\n", ind); + malloc_printf( + ": error in background thread " + "creation for arena %u. Abort.\n", + ind); abort(); } } @@ -479,8 +487,8 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) { &arena->binshard_next, 1, ATOMIC_RELAXED); tsd_binshards_t *bins = tsd_binshardsp_get(tsd); for (unsigned i = 0; i < SC_NBINS; i++) { - assert(bin_infos[i].n_shards > 0 && - bin_infos[i].n_shards <= BIN_SHARDS_MAX); + assert(bin_infos[i].n_shards > 0 + && bin_infos[i].n_shards <= BIN_SHARDS_MAX); bins->binshard[i] = shard % bin_infos[i].n_shards; } } @@ -495,8 +503,8 @@ arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena) { arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); - if (arena_nthreads_get(oldarena, false) == 0 && - !background_thread_enabled()) { + if (arena_nthreads_get(oldarena, false) == 0 + && !background_thread_enabled()) { /* * Purge if the old arena has no associated threads anymore and * no background threads. @@ -537,7 +545,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) { if (narenas_auto > 1) { unsigned i, j, choose[2], first_null; - bool is_new_arena[2]; + bool is_new_arena[2]; /* * Determine binding for both non-internal and internal @@ -562,11 +570,14 @@ arena_choose_hard(tsd_t *tsd, bool internal) { * number of threads assigned to it. */ for (j = 0; j < 2; j++) { - if (arena_nthreads_get(arena_get( - tsd_tsdn(tsd), i, false), !!j) < - arena_nthreads_get(arena_get( - tsd_tsdn(tsd), choose[j], false), - !!j)) { + if (arena_nthreads_get( + arena_get( + tsd_tsdn(tsd), i, false), + !!j) + < arena_nthreads_get( + arena_get(tsd_tsdn(tsd), + choose[j], false), + !!j)) { choose[j] = i; } } @@ -585,16 +596,17 @@ arena_choose_hard(tsd_t *tsd, bool internal) { } for (j = 0; j < 2; j++) { - if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), - choose[j], false), !!j) == 0 || first_null == - narenas_auto) { + if (arena_nthreads_get( + arena_get(tsd_tsdn(tsd), choose[j], false), !!j) + == 0 + || first_null == narenas_auto) { /* * Use an unloaded arena, or the least loaded * arena if all arenas are already initialized. */ if (!!j == internal) { - ret = arena_get(tsd_tsdn(tsd), - choose[j], false); + ret = arena_get( + tsd_tsdn(tsd), choose[j], false); } } else { arena_t *arena; @@ -604,8 +616,8 @@ arena_choose_hard(tsd_t *tsd, bool internal) { arena = arena_init_locked(tsd_tsdn(tsd), choose[j], &arena_config_default); if (arena == NULL) { - malloc_mutex_unlock(tsd_tsdn(tsd), - &arenas_lock); + malloc_mutex_unlock( + tsd_tsdn(tsd), &arenas_lock); return NULL; } is_new_arena[j] = true; @@ -657,7 +669,7 @@ arena_cleanup(tsd_t *tsd) { static void stats_print_atexit(void) { if (config_stats) { - tsdn_t *tsdn; + tsdn_t *tsdn; unsigned narenas, i; tsdn = tsdn_fetch(); @@ -675,13 +687,13 @@ stats_print_atexit(void) { tcache_slow_t *tcache_slow; malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); - ql_foreach(tcache_slow, &arena->tcache_ql, - link) { - tcache_stats_merge(tsdn, - tcache_slow->tcache, arena); + ql_foreach ( + tcache_slow, &arena->tcache_ql, link) { + tcache_stats_merge( + tsdn, tcache_slow->tcache, arena); } - malloc_mutex_unlock(tsdn, - &arena->tcache_ql_mtx); + malloc_mutex_unlock( + tsdn, &arena->tcache_ql_mtx); } } } @@ -726,16 +738,16 @@ jemalloc_getenv(const char *name) { #ifdef JEMALLOC_FORCE_GETENV return getenv(name); #else -# ifdef JEMALLOC_HAVE_SECURE_GETENV +# ifdef JEMALLOC_HAVE_SECURE_GETENV return secure_getenv(name); -# else -# ifdef JEMALLOC_HAVE_ISSETUGID +# else +# ifdef JEMALLOC_HAVE_ISSETUGID if (issetugid() != 0) { return NULL; } -# endif +# endif return getenv(name); -# endif +# endif #endif } @@ -759,16 +771,16 @@ malloc_ncpus(void) { * is available, to avoid using more arenas than necessary. */ { -# if defined(__FreeBSD__) || defined(__DragonFly__) +# if defined(__FreeBSD__) || defined(__DragonFly__) cpuset_t set; -# else +# else cpu_set_t set; -# endif -# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) +# endif +# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) sched_getaffinity(0, sizeof(set), &set); -# else +# else pthread_getaffinity_np(pthread_self(), sizeof(set), &set); -# endif +# endif result = CPU_COUNT(&set); } #else @@ -785,8 +797,7 @@ malloc_ncpus(void) { * Since otherwise tricky things is possible with percpu arenas in use. */ static bool -malloc_cpu_count_is_deterministic(void) -{ +malloc_cpu_count_is_deterministic(void) { #ifdef _WIN32 return true; #else @@ -795,22 +806,22 @@ malloc_cpu_count_is_deterministic(void) if (cpu_onln != cpu_conf) { return false; } -# if defined(CPU_COUNT) -# if defined(__FreeBSD__) || defined(__DragonFly__) +# if defined(CPU_COUNT) +# if defined(__FreeBSD__) || defined(__DragonFly__) cpuset_t set; -# else +# else cpu_set_t set; -# endif /* __FreeBSD__ */ -# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) +# endif /* __FreeBSD__ */ +# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) sched_getaffinity(0, sizeof(set), &set); -# else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */ +# else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */ pthread_getaffinity_np(pthread_self(), sizeof(set), &set); -# endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */ +# endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */ long cpu_affinity = CPU_COUNT(&set); if (cpu_affinity != cpu_conf) { return false; } -# endif /* CPU_COUNT */ +# endif /* CPU_COUNT */ return true; #endif } @@ -822,10 +833,13 @@ init_opt_stats_opts(const char *v, size_t vlen, char *dest) { for (size_t i = 0; i < vlen; i++) { switch (v[i]) { -#define OPTION(o, v, d, s) case o: break; +#define OPTION(o, v, d, s) \ + case o: \ + break; STATS_PRINT_OPTIONS #undef OPTION - default: continue; + default: + continue; } if (strchr(dest, v[i]) != NULL) { @@ -851,25 +865,75 @@ malloc_conf_format_error(const char *msg, const char *begin, const char *end) { static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, char const **v_p, size_t *vlen_p) { - bool accept; + bool accept; const char *opts = *opts_p; *k_p = opts; for (accept = false; !accept;) { switch (*opts) { - case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': - case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': - case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': - case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': - case 'Y': case 'Z': - case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': - case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': - case 's': case 't': case 'u': case 'v': case 'w': case 'x': - case 'y': case 'z': - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': + case 'F': + case 'G': + case 'H': + case 'I': + case 'J': + case 'K': + case 'L': + case 'M': + case 'N': + case 'O': + case 'P': + case 'Q': + case 'R': + case 'S': + case 'T': + case 'U': + case 'V': + case 'W': + case 'X': + case 'Y': + case 'Z': + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': + case 'f': + case 'g': + case 'h': + case 'i': + case 'j': + case 'k': + case 'l': + case 'm': + case 'n': + case 'o': + case 'p': + case 'q': + case 'r': + case 's': + case 't': + case 'u': + case 'v': + case 'w': + case 'x': + case 'y': + case 'z': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': case '_': opts++; break; @@ -882,8 +946,8 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, case '\0': if (opts != *opts_p) { malloc_conf_format_error( - "Conf string ends with key", - *opts_p, opts - 1); + "Conf string ends with key", *opts_p, + opts - 1); had_conf_error = true; } return true; @@ -908,8 +972,8 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, */ if (*opts == '\0') { malloc_conf_format_error( - "Conf string ends with comma", - *opts_p, opts - 1); + "Conf string ends with comma", *opts_p, + opts - 1); had_conf_error = true; } *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; @@ -932,16 +996,17 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, static void malloc_abort_invalid_conf(void) { assert(opt_abort_conf); - malloc_printf(": Abort (abort_conf:true) on invalid conf " + malloc_printf( + ": Abort (abort_conf:true) on invalid conf " "value (see above).\n"); invalid_conf_abort(); } static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) { - malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, - (int)vlen, v); +malloc_conf_error( + const char *msg, const char *k, size_t klen, const char *v, size_t vlen) { + malloc_printf( + ": %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); /* If abort_conf is set, error out after processing all options. */ const char *experimental = "experimental_"; if (strncmp(k, experimental, strlen(experimental)) == 0) { @@ -1002,48 +1067,50 @@ obtain_malloc_conf(unsigned which_source, char readlink_buf[PATH_MAX + 1]) { break; #else ssize_t linklen = 0; -# ifndef _WIN32 - int saved_errno = errno; +# ifndef _WIN32 + int saved_errno = errno; const char *linkname = -# ifdef JEMALLOC_PREFIX - "/etc/"JEMALLOC_PREFIX"malloc.conf" -# else +# ifdef JEMALLOC_PREFIX + "/etc/" JEMALLOC_PREFIX "malloc.conf" +# else "/etc/malloc.conf" -# endif +# endif ; /* * Try to use the contents of the "/etc/malloc.conf" symbolic * link's name. */ -# ifndef JEMALLOC_READLINKAT +# ifndef JEMALLOC_READLINKAT linklen = readlink(linkname, readlink_buf, PATH_MAX); -# else - linklen = readlinkat(AT_FDCWD, linkname, readlink_buf, PATH_MAX); -# endif +# else + linklen = readlinkat( + AT_FDCWD, linkname, readlink_buf, PATH_MAX); +# endif if (linklen == -1) { /* No configuration specified. */ linklen = 0; /* Restore errno. */ set_errno(saved_errno); } -# endif +# endif readlink_buf[linklen] = '\0'; ret = readlink_buf; break; #endif - } case 3: { + } + case 3: { #ifndef JEMALLOC_CONFIG_ENV ret = NULL; break; #else const char *envname = -# ifdef JEMALLOC_PREFIX - JEMALLOC_CPREFIX"MALLOC_CONF" -# else - "MALLOC_CONF" -# endif - ; +# ifdef JEMALLOC_PREFIX + JEMALLOC_CPREFIX "MALLOC_CONF" +# else + "MALLOC_CONF" +# endif + ; if ((ret = jemalloc_getenv(envname)) != NULL) { opt_malloc_conf_env_var = ret; @@ -1053,10 +1120,12 @@ obtain_malloc_conf(unsigned which_source, char readlink_buf[PATH_MAX + 1]) { } break; #endif - } case 4: { + } + case 4: { ret = je_malloc_conf_2_conf_harder; break; - } default: + } + default: not_reached(); ret = NULL; } @@ -1072,15 +1141,16 @@ validate_hpa_settings(void) { had_conf_error = true; malloc_printf( ": huge page size (%zu) greater than expected." - "May not be supported or behave as expected.", HUGEPAGE); + "May not be supported or behave as expected.", + HUGEPAGE); } #ifndef JEMALLOC_HAVE_MADVISE_COLLAPSE if (opt_hpa_opts.hugify_sync) { - had_conf_error = true; - malloc_printf( - ": hpa_hugify_sync config option is enabled, " - "but MADV_COLLAPSE support was not detected at build " - "time."); + had_conf_error = true; + malloc_printf( + ": hpa_hugify_sync config option is enabled, " + "but MADV_COLLAPSE support was not detected at build " + "time."); } #endif } @@ -1090,17 +1160,17 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES], char readlink_buf[PATH_MAX + 1]) { static const char *opts_explain[MALLOC_CONF_NSOURCES] = { - "string specified via --with-malloc-conf", - "string pointed to by the global variable malloc_conf", - "\"name\" of the file referenced by the symbolic link named " - "/etc/malloc.conf", - "value of the environment variable MALLOC_CONF", - "string pointed to by the global variable " - "malloc_conf_2_conf_harder", + "string specified via --with-malloc-conf", + "string pointed to by the global variable malloc_conf", + "\"name\" of the file referenced by the symbolic link named " + "/etc/malloc.conf", + "value of the environment variable MALLOC_CONF", + "string pointed to by the global variable " + "malloc_conf_2_conf_harder", }; - unsigned i; + unsigned i; const char *opts, *k, *v; - size_t klen, vlen; + size_t klen, vlen; for (i = 0; i < MALLOC_CONF_NSOURCES; i++) { /* Get runtime configuration. */ @@ -1110,129 +1180,116 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], opts = opts_cache[i]; if (!initial_call && opt_confirm_conf) { malloc_printf( - ": malloc_conf #%u (%s): \"%s\"\n", - i + 1, opts_explain[i], opts != NULL ? opts : ""); + ": malloc_conf #%u (%s): \"%s\"\n", i + 1, + opts_explain[i], opts != NULL ? opts : ""); } if (opts == NULL) { continue; } - while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, - &vlen)) { - -#define CONF_ERROR(msg, k, klen, v, vlen) \ - if (!initial_call) { \ - malloc_conf_error( \ - msg, k, klen, v, vlen); \ - cur_opt_valid = false; \ - } -#define CONF_CONTINUE { \ - if (!initial_call && opt_confirm_conf \ - && cur_opt_valid) { \ - malloc_printf(": -- " \ - "Set conf value: %.*s:%.*s" \ - "\n", (int)klen, k, \ - (int)vlen, v); \ - } \ - continue; \ - } -#define CONF_MATCH(n) \ - (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) -#define CONF_MATCH_VALUE(n) \ - (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) -#define CONF_HANDLE_BOOL(o, n) \ - if (CONF_MATCH(n)) { \ - if (CONF_MATCH_VALUE("true")) { \ - o = true; \ - } else if (CONF_MATCH_VALUE("false")) { \ - o = false; \ - } else { \ - CONF_ERROR("Invalid conf value",\ - k, klen, v, vlen); \ - } \ - CONF_CONTINUE; \ - } - /* + while (*opts != '\0' + && !malloc_conf_next(&opts, &k, &klen, &v, &vlen)) { +#define CONF_ERROR(msg, k, klen, v, vlen) \ + if (!initial_call) { \ + malloc_conf_error(msg, k, klen, v, vlen); \ + cur_opt_valid = false; \ + } +#define CONF_CONTINUE \ + { \ + if (!initial_call && opt_confirm_conf && cur_opt_valid) { \ + malloc_printf( \ + ": -- " \ + "Set conf value: %.*s:%.*s" \ + "\n", \ + (int)klen, k, (int)vlen, v); \ + } \ + continue; \ + } +#define CONF_MATCH(n) (sizeof(n) - 1 == klen && strncmp(n, k, klen) == 0) +#define CONF_MATCH_VALUE(n) (sizeof(n) - 1 == vlen && strncmp(n, v, vlen) == 0) +#define CONF_HANDLE_BOOL(o, n) \ + if (CONF_MATCH(n)) { \ + if (CONF_MATCH_VALUE("true")) { \ + o = true; \ + } else if (CONF_MATCH_VALUE("false")) { \ + o = false; \ + } else { \ + CONF_ERROR("Invalid conf value", k, klen, v, vlen); \ + } \ + CONF_CONTINUE; \ + } + /* * One of the CONF_MIN macros below expands, in one of the use points, * to "unsigned integer < 0", which is always false, triggering the * GCC -Wtype-limits warning, which we disable here and re-enable below. */ - JEMALLOC_DIAGNOSTIC_PUSH - JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS -#define CONF_DONT_CHECK_MIN(um, min) false -#define CONF_CHECK_MIN(um, min) ((um) < (min)) -#define CONF_DONT_CHECK_MAX(um, max) false -#define CONF_CHECK_MAX(um, max) ((um) > (max)) +#define CONF_DONT_CHECK_MIN(um, min) false +#define CONF_CHECK_MIN(um, min) ((um) < (min)) +#define CONF_DONT_CHECK_MAX(um, max) false +#define CONF_CHECK_MAX(um, max) ((um) > (max)) -#define CONF_VALUE_READ(max_t, result) \ - char *end; \ - set_errno(0); \ - result = (max_t)malloc_strtoumax(v, &end, 0); -#define CONF_VALUE_READ_FAIL() \ - (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen) +#define CONF_VALUE_READ(max_t, result) \ + char *end; \ + set_errno(0); \ + result = (max_t)malloc_strtoumax(v, &end, 0); +#define CONF_VALUE_READ_FAIL() \ + (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen) -#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \ - if (CONF_MATCH(n)) { \ - max_t mv; \ - CONF_VALUE_READ(max_t, mv) \ - if (CONF_VALUE_READ_FAIL()) { \ - CONF_ERROR("Invalid conf value",\ - k, klen, v, vlen); \ - } else if (clip) { \ - if (check_min(mv, (t)(min))) { \ - o = (t)(min); \ - } else if ( \ - check_max(mv, (t)(max))) { \ - o = (t)(max); \ - } else { \ - o = (t)mv; \ - } \ - } else { \ - if (check_min(mv, (t)(min)) || \ - check_max(mv, (t)(max))) { \ - CONF_ERROR( \ - "Out-of-range " \ - "conf value", \ - k, klen, v, vlen); \ - } else { \ - o = (t)mv; \ - } \ - } \ - CONF_CONTINUE; \ - } -#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ - CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \ - check_max, clip) -#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\ - CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \ - check_max, clip) +#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \ + if (CONF_MATCH(n)) { \ + max_t mv; \ + CONF_VALUE_READ(max_t, mv) \ + if (CONF_VALUE_READ_FAIL()) { \ + CONF_ERROR("Invalid conf value", k, klen, v, vlen); \ + } else if (clip) { \ + if (check_min(mv, (t)(min))) { \ + o = (t)(min); \ + } else if (check_max(mv, (t)(max))) { \ + o = (t)(max); \ + } else { \ + o = (t)mv; \ + } \ + } else { \ + if (check_min(mv, (t)(min)) \ + || check_max(mv, (t)(max))) { \ + CONF_ERROR( \ + "Out-of-range " \ + "conf value", \ + k, klen, v, vlen); \ + } else { \ + o = (t)mv; \ + } \ + } \ + CONF_CONTINUE; \ + } +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, check_max, clip) +#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, check_max, clip) -#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ - clip) \ - CONF_HANDLE_T_U(unsigned, o, n, min, max, \ - check_min, check_max, clip) -#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ - CONF_HANDLE_T_U(size_t, o, n, min, max, \ - check_min, check_max, clip) -#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \ - CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \ - check_min, check_max, clip) -#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\ - CONF_HANDLE_T_U(uint64_t, o, n, min, max, \ - check_min, check_max, clip) -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \ - CONF_CHECK_MIN, CONF_CHECK_MAX, false) -#define CONF_HANDLE_CHAR_P(o, n, d) \ - if (CONF_MATCH(n)) { \ - size_t cpylen = (vlen <= \ - sizeof(o)-1) ? vlen : \ - sizeof(o)-1; \ - strncpy(o, v, cpylen); \ - o[cpylen] = '\0'; \ - CONF_CONTINUE; \ - } +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, check_min, check_max, clip) +#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_SIGNED( \ + int64_t, o, n, min, max, check_min, check_max, clip) +#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(uint64_t, o, n, min, max, check_min, check_max, clip) +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + CONF_HANDLE_T_SIGNED( \ + ssize_t, o, n, min, max, CONF_CHECK_MIN, CONF_CHECK_MAX, false) +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (CONF_MATCH(n)) { \ + size_t cpylen = (vlen <= sizeof(o) - 1) ? vlen \ + : sizeof(o) - 1; \ + strncpy(o, v, cpylen); \ + o[cpylen] = '\0'; \ + CONF_CONTINUE; \ + } bool cur_opt_valid = true; @@ -1245,27 +1302,29 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") CONF_HANDLE_BOOL(opt_cache_oblivious, "cache_oblivious") CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise") - CONF_HANDLE_BOOL(opt_huge_arena_pac_thp, "huge_arena_pac_thp") + CONF_HANDLE_BOOL( + opt_huge_arena_pac_thp, "huge_arena_pac_thp") if (strncmp("metadata_thp", k, klen) == 0) { - int m; + int m; bool match = false; for (m = 0; m < metadata_thp_mode_limit; m++) { if (strncmp(metadata_thp_mode_names[m], - v, vlen) == 0) { + v, vlen) + == 0) { opt_metadata_thp = m; match = true; break; } } if (!match) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } CONF_CONTINUE; } CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { - int m; + int m; bool match = false; for (m = 0; m < dss_prec_limit; m++) { if (strncmp(dss_prec_names[m], v, vlen) @@ -1283,8 +1342,8 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } } if (!match) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } CONF_CONTINUE; } @@ -1301,31 +1360,32 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } if (CONF_MATCH("narenas_ratio")) { char *end; - bool err = fxp_parse(&opt_narenas_ratio, v, - &end); + bool err = fxp_parse( + &opt_narenas_ratio, v, &end); if (err || (size_t)(end - v) != vlen) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } CONF_CONTINUE; } if (CONF_MATCH("bin_shards")) { const char *bin_shards_segment_cur = v; - size_t vlen_left = vlen; + size_t vlen_left = vlen; do { size_t size_start; size_t size_end; size_t nshards; - bool err = multi_setting_parse_next( - &bin_shards_segment_cur, &vlen_left, - &size_start, &size_end, &nshards); - if (err || bin_update_shard_size( - bin_shard_sizes, size_start, - size_end, nshards)) { + bool err = multi_setting_parse_next( + &bin_shards_segment_cur, &vlen_left, + &size_start, &size_end, &nshards); + if (err + || bin_update_shard_size( + bin_shard_sizes, size_start, + size_end, nshards)) { CONF_ERROR( "Invalid settings for " - "bin_shards", k, klen, v, - vlen); + "bin_shards", + k, klen, v, vlen); break; } } while (vlen_left > 0); @@ -1337,12 +1397,11 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], /* clip */ true) CONF_HANDLE_SIZE_T(opt_bin_info_remote_free_max_batch, "remote_free_max_batch", 0, - BIN_REMOTE_FREE_ELEMS_MAX, - CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, + BIN_REMOTE_FREE_ELEMS_MAX, CONF_DONT_CHECK_MIN, + CONF_CHECK_MAX, /* clip */ true) CONF_HANDLE_SIZE_T(opt_bin_info_remote_free_max, - "remote_free_max", 0, - BIN_REMOTE_FREE_ELEMS_MAX, + "remote_free_max", 0, BIN_REMOTE_FREE_ELEMS_MAX, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) @@ -1350,9 +1409,10 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], bool err = tcache_bin_info_default_init( v, vlen); if (err) { - CONF_ERROR("Invalid settings for " - "tcache_ncached_max", k, klen, v, - vlen); + CONF_ERROR( + "Invalid settings for " + "tcache_ncached_max", + k, klen, v, vlen); } CONF_CONTINUE; } @@ -1360,13 +1420,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], "mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false); CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, - "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < - QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : - SSIZE_MAX); + "dirty_decay_ms", -1, + NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) + ? NSTIME_SEC_MAX * KQU(1000) + : SSIZE_MAX); CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, - "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < - QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : - SSIZE_MAX); + "muzzy_decay_ms", -1, + NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) + ? NSTIME_SEC_MAX * KQU(1000) + : SSIZE_MAX); CONF_HANDLE_SIZE_T(opt_process_madvise_max_batch, "process_madvise_max_batch", 0, PROCESS_MADVISE_MAX_BATCH_LIMIT, @@ -1374,16 +1436,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], /* clip */ true) CONF_HANDLE_BOOL(opt_stats_print, "stats_print") if (CONF_MATCH("stats_print_opts")) { - init_opt_stats_opts(v, vlen, - opt_stats_print_opts); + init_opt_stats_opts( + v, vlen, opt_stats_print_opts); CONF_CONTINUE; } CONF_HANDLE_INT64_T(opt_stats_interval, - "stats_interval", -1, INT64_MAX, - CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false) + "stats_interval", -1, INT64_MAX, CONF_CHECK_MIN, + CONF_DONT_CHECK_MAX, false) if (CONF_MATCH("stats_interval_opts")) { - init_opt_stats_opts(v, vlen, - opt_stats_interval_opts); + init_opt_stats_opts( + v, vlen, opt_stats_interval_opts); CONF_CONTINUE; } if (config_fill) { @@ -1405,8 +1467,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], opt_junk_alloc = false; opt_junk_free = true; } else { - CONF_ERROR( - "Invalid conf value", + CONF_ERROR("Invalid conf value", k, klen, v, vlen); } CONF_CONTINUE; @@ -1428,15 +1489,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_HANDLE_BOOL(opt_experimental_tcache_gc, "experimental_tcache_gc") CONF_HANDLE_BOOL(opt_tcache, "tcache") - CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max", - 0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN, + CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max", 0, + TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) if (CONF_MATCH("lg_tcache_max")) { size_t m; CONF_VALUE_READ(size_t, m) if (CONF_VALUE_READ_FAIL()) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } else { /* clip if necessary */ if (m > TCACHE_LG_MAXCLASS_LIMIT) { @@ -1454,14 +1515,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], "lg_tcache_nslots_mul", -16, 16) /* Ditto with values past 2048. */ CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min, - "tcache_nslots_small_min", 1, 2048, - CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + "tcache_nslots_small_min", 1, 2048, CONF_CHECK_MIN, + CONF_CHECK_MAX, /* clip */ true) CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max, - "tcache_nslots_small_max", 1, 2048, - CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + "tcache_nslots_small_max", 1, 2048, CONF_CHECK_MIN, + CONF_CHECK_MAX, /* clip */ true) CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large, - "tcache_nslots_large", 1, 2048, - CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + "tcache_nslots_large", 1, 2048, CONF_CHECK_MIN, + CONF_CHECK_MAX, /* clip */ true) CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes, "tcache_gc_incr_bytes", 1024, SIZE_T_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, @@ -1471,18 +1532,19 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, /* clip */ false) CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div, - "lg_tcache_flush_small_div", 1, 16, - CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + "lg_tcache_flush_small_div", 1, 16, CONF_CHECK_MIN, + CONF_CHECK_MAX, /* clip */ true) CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div, - "lg_tcache_flush_large_div", 1, 16, - CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + "lg_tcache_flush_large_div", 1, 16, CONF_CHECK_MIN, + CONF_CHECK_MAX, /* clip */ true) CONF_HANDLE_UNSIGNED(opt_debug_double_free_max_scan, "debug_double_free_max_scan", 0, UINT_MAX, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, /* clip */ false) CONF_HANDLE_SIZE_T(opt_calloc_madvise_threshold, "calloc_madvise_threshold", 0, SC_LARGE_MAXCLASS, - CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, /* clip */ false) + CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, + /* clip */ false) /* * The runtime option of oversize_threshold remains @@ -1502,10 +1564,11 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], if (strncmp("percpu_arena", k, klen) == 0) { bool match = false; - for (int m = percpu_arena_mode_names_base; m < - percpu_arena_mode_names_limit; m++) { + for (int m = percpu_arena_mode_names_base; + m < percpu_arena_mode_names_limit; m++) { if (strncmp(percpu_arena_mode_names[m], - v, vlen) == 0) { + v, vlen) + == 0) { if (!have_percpu_arena) { CONF_ERROR( "No getcpu support", @@ -1517,18 +1580,17 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } } if (!match) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } CONF_CONTINUE; } - CONF_HANDLE_BOOL(opt_background_thread, - "background_thread"); + CONF_HANDLE_BOOL( + opt_background_thread, "background_thread"); CONF_HANDLE_SIZE_T(opt_max_background_threads, - "max_background_threads", 1, - opt_max_background_threads, - CONF_CHECK_MIN, CONF_CHECK_MAX, - true); + "max_background_threads", 1, + opt_max_background_threads, CONF_CHECK_MIN, + CONF_CHECK_MAX, true); CONF_HANDLE_BOOL(opt_hpa, "hpa") CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc, "hpa_slab_max_alloc", PAGE, HUGEPAGE, @@ -1544,12 +1606,11 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], if (CONF_MATCH("hpa_hugification_threshold_ratio")) { fxp_t ratio; char *end; - bool err = fxp_parse(&ratio, v, - &end); + bool err = fxp_parse(&ratio, v, &end); if (err || (size_t)(end - v) != vlen || ratio > FXP_INIT_INT(1)) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } else { opt_hpa_opts.hugification_threshold = fxp_mul_frac(HUGEPAGE, ratio); @@ -1557,16 +1618,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_CONTINUE; } - CONF_HANDLE_UINT64_T( - opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms", - 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, - false); + CONF_HANDLE_UINT64_T(opt_hpa_opts.hugify_delay_ms, + "hpa_hugify_delay_ms", 0, 0, CONF_DONT_CHECK_MIN, + CONF_DONT_CHECK_MAX, false); CONF_HANDLE_BOOL( opt_hpa_opts.hugify_sync, "hpa_hugify_sync"); - CONF_HANDLE_UINT64_T( - opt_hpa_opts.min_purge_interval_ms, + CONF_HANDLE_UINT64_T(opt_hpa_opts.min_purge_interval_ms, "hpa_min_purge_interval_ms", 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false); @@ -1581,11 +1640,10 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } fxp_t ratio; char *end; - bool err = fxp_parse(&ratio, v, - &end); + bool err = fxp_parse(&ratio, v, &end); if (err || (size_t)(end - v) != vlen) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } else { opt_hpa_opts.dirty_mult = ratio; } @@ -1596,8 +1654,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], "hpa_sec_nshards", 0, 0, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true); CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc, - "hpa_sec_max_alloc", PAGE, USIZE_GROW_SLOW_THRESHOLD, - CONF_CHECK_MIN, CONF_CHECK_MAX, true); + "hpa_sec_max_alloc", PAGE, + USIZE_GROW_SLOW_THRESHOLD, CONF_CHECK_MIN, + CONF_CHECK_MAX, true); CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes, "hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true); @@ -1613,23 +1672,23 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], sc_data_init(sc_data); CONF_CONTINUE; } - bool err; + bool err; const char *slab_size_segment_cur = v; - size_t vlen_left = vlen; + size_t vlen_left = vlen; do { size_t slab_start; size_t slab_end; size_t pgs; err = multi_setting_parse_next( - &slab_size_segment_cur, - &vlen_left, &slab_start, &slab_end, - &pgs); + &slab_size_segment_cur, &vlen_left, + &slab_start, &slab_end, &pgs); if (!err) { sc_data_update_slab_size( sc_data, slab_start, slab_end, (int)pgs); } else { - CONF_ERROR("Invalid settings " + CONF_ERROR( + "Invalid settings " "for slab_sizes", k, klen, v, vlen); } @@ -1638,22 +1697,24 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof") - CONF_HANDLE_CHAR_P(opt_prof_prefix, - "prof_prefix", "jeprof") + CONF_HANDLE_CHAR_P( + opt_prof_prefix, "prof_prefix", "jeprof") CONF_HANDLE_BOOL(opt_prof_active, "prof_active") CONF_HANDLE_BOOL(opt_prof_thread_active_init, "prof_thread_active_init") CONF_HANDLE_SIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, - true) - CONF_HANDLE_SIZE_T(opt_experimental_lg_prof_threshold, - "experimental_lg_prof_threshold", 0, (sizeof(uint64_t) << 3) - - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, - true) + "lg_prof_sample", 0, + (sizeof(uint64_t) << 3) - 1, + CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, true) + CONF_HANDLE_SIZE_T( + opt_experimental_lg_prof_threshold, + "experimental_lg_prof_threshold", 0, + (sizeof(uint64_t) << 3) - 1, + CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") - CONF_HANDLE_UNSIGNED(opt_prof_bt_max, "prof_bt_max", - 1, PROF_BT_MAX_LIMIT, CONF_CHECK_MIN, CONF_CHECK_MAX, + CONF_HANDLE_UNSIGNED(opt_prof_bt_max, + "prof_bt_max", 1, PROF_BT_MAX_LIMIT, + CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, @@ -1661,10 +1722,11 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") CONF_HANDLE_BOOL(opt_prof_final, "prof_final") CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") - CONF_HANDLE_BOOL(opt_prof_leak_error, - "prof_leak_error") + CONF_HANDLE_BOOL( + opt_prof_leak_error, "prof_leak_error") CONF_HANDLE_BOOL(opt_prof_log, "prof_log") - CONF_HANDLE_BOOL(opt_prof_pid_namespace, "prof_pid_namespace") + CONF_HANDLE_BOOL(opt_prof_pid_namespace, + "prof_pid_namespace") CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max, "prof_recent_alloc_max", -1, SSIZE_MAX) CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats") @@ -1703,9 +1765,10 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } if (config_log) { if (CONF_MATCH("log")) { - size_t cpylen = ( - vlen <= sizeof(log_var_names) ? - vlen : sizeof(log_var_names) - 1); + size_t cpylen = (vlen + <= sizeof(log_var_names) + ? vlen + : sizeof(log_var_names) - 1); strncpy(log_var_names, v, cpylen); log_var_names[cpylen] = '\0'; CONF_CONTINUE; @@ -1714,12 +1777,13 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], if (CONF_MATCH("thp")) { bool match = false; for (int m = 0; m < thp_mode_names_limit; m++) { - if (strncmp(thp_mode_names[m],v, vlen) + if (strncmp(thp_mode_names[m], v, vlen) == 0) { - if (!have_madvise_huge && !have_memcntl) { + if (!have_madvise_huge + && !have_memcntl) { CONF_ERROR( - "No THP support", - k, klen, v, vlen); + "No THP support", k, + klen, v, vlen); } opt_thp = m; match = true; @@ -1727,34 +1791,34 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } } if (!match) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } CONF_CONTINUE; } if (CONF_MATCH("zero_realloc")) { if (CONF_MATCH_VALUE("alloc")) { - opt_zero_realloc_action - = zero_realloc_action_alloc; + opt_zero_realloc_action = + zero_realloc_action_alloc; } else if (CONF_MATCH_VALUE("free")) { - opt_zero_realloc_action - = zero_realloc_action_free; + opt_zero_realloc_action = + zero_realloc_action_free; } else if (CONF_MATCH_VALUE("abort")) { - opt_zero_realloc_action - = zero_realloc_action_abort; + opt_zero_realloc_action = + zero_realloc_action_abort; } else { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } CONF_CONTINUE; } - if (config_uaf_detection && - CONF_MATCH("lg_san_uaf_align")) { + if (config_uaf_detection + && CONF_MATCH("lg_san_uaf_align")) { ssize_t a; CONF_VALUE_READ(ssize_t, a) if (CONF_VALUE_READ_FAIL() || a < -1) { - CONF_ERROR("Invalid conf value", - k, klen, v, vlen); + CONF_ERROR("Invalid conf value", k, + klen, v, vlen); } if (a == -1) { opt_lg_san_uaf_align = -1; @@ -1807,8 +1871,8 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P - /* Re-enable diagnostic "-Wtype-limits" */ - JEMALLOC_DIAGNOSTIC_POP + /* Re-enable diagnostic "-Wtype-limits" */ + JEMALLOC_DIAGNOSTIC_POP } validate_hpa_settings(); if (opt_abort_conf && had_conf_error) { @@ -1821,7 +1885,8 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], static bool malloc_conf_init_check_deps(void) { if (opt_prof_leak_error && !opt_prof_final) { - malloc_printf(": prof_leak_error is set w/o " + malloc_printf( + ": prof_leak_error is set w/o " "prof_final.\n"); return true; } @@ -1836,13 +1901,13 @@ malloc_conf_init_check_deps(void) { static void malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], char readlink_buf[PATH_MAX + 1]) { - const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL, - NULL}; + const char *opts_cache[MALLOC_CONF_NSOURCES] = { + NULL, NULL, NULL, NULL, NULL}; /* The first call only set the confirm_conf option and opts_cache */ malloc_conf_init_helper(NULL, NULL, true, opts_cache, readlink_buf); - malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache, - NULL); + malloc_conf_init_helper( + sc_data, bin_shard_sizes, false, opts_cache, NULL); if (malloc_conf_init_check_deps()) { /* check_deps does warning msg only; abort below if needed. */ if (opt_abort_conf) { @@ -1855,8 +1920,9 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], static bool malloc_init_hard_needed(void) { - if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == - malloc_init_recursible)) { + if (malloc_initialized() + || (IS_INITIALIZER + && malloc_init_state == malloc_init_recursible)) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing @@ -1946,7 +2012,8 @@ malloc_init_hard_a0_locked(void) { prof_boot1(); } if (opt_hpa && !hpa_supported()) { - malloc_printf(": HPA not supported in the current " + malloc_printf( + ": HPA not supported in the current " "configuration; %s.", opt_abort_conf ? "aborting" : "disabling"); if (opt_abort_conf) { @@ -1962,7 +2029,7 @@ malloc_init_hard_a0_locked(void) { return true; } if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { return true; } hook_boot(); @@ -1984,7 +2051,8 @@ malloc_init_hard_a0_locked(void) { a0 = arena_get(TSDN_NULL, 0, false); if (opt_hpa && !hpa_supported()) { - malloc_printf(": HPA not supported in the current " + malloc_printf( + ": HPA not supported in the current " "configuration; %s.", opt_abort_conf ? "aborting" : "disabling"); if (opt_abort_conf) { @@ -2035,7 +2103,8 @@ malloc_init_hard_recursible(void) { */ if (opt_narenas == 0) { opt_percpu_arena = percpu_arena_disabled; - malloc_write(": Number of CPUs " + malloc_write( + ": Number of CPUs " "detected is not deterministic. Per-CPU " "arena disabled.\n"); if (opt_abort_conf) { @@ -2049,11 +2118,12 @@ malloc_init_hard_recursible(void) { } #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ - && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ - !defined(__native_client__)) + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) \ + && !defined(__native_client__)) /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, - jemalloc_postfork_child) != 0) { + jemalloc_postfork_child) + != 0) { malloc_write(": Error in pthread_atfork()\n"); if (opt_abort) { abort(); @@ -2077,8 +2147,8 @@ malloc_narenas_default(void) { * default. */ if (ncpus > 1) { - fxp_t fxp_ncpus = FXP_INIT_INT(ncpus); - fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio); + fxp_t fxp_ncpus = FXP_INIT_INT(ncpus); + fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio); uint32_t int_goal = fxp_round_nearest(goal); if (int_goal == 0) { return 1; @@ -2108,28 +2178,35 @@ malloc_init_narenas(tsdn_t *tsdn) { if (opt_percpu_arena != percpu_arena_disabled) { if (!have_percpu_arena || malloc_getcpu() < 0) { opt_percpu_arena = percpu_arena_disabled; - malloc_printf(": perCPU arena getcpu() not " - "available. Setting narenas to %u.\n", opt_narenas ? - opt_narenas : malloc_narenas_default()); + malloc_printf( + ": perCPU arena getcpu() not " + "available. Setting narenas to %u.\n", + opt_narenas ? opt_narenas + : malloc_narenas_default()); if (opt_abort) { abort(); } } else { if (ncpus >= MALLOCX_ARENA_LIMIT) { - malloc_printf(": narenas w/ percpu" - "arena beyond limit (%d)\n", ncpus); + malloc_printf( + ": narenas w/ percpu" + "arena beyond limit (%d)\n", + ncpus); if (opt_abort) { abort(); } return true; } /* NB: opt_percpu_arena isn't fully initialized yet. */ - if (percpu_arena_as_initialized(opt_percpu_arena) == - per_phycpu_arena && ncpus % 2 != 0) { - malloc_printf(": invalid " + if (percpu_arena_as_initialized(opt_percpu_arena) + == per_phycpu_arena + && ncpus % 2 != 0) { + malloc_printf( + ": invalid " "configuration -- per physical CPU arena " "with odd number (%u) of CPUs (no hyper " - "threading?).\n", ncpus); + "threading?).\n", + ncpus); if (opt_abort) abort(); } @@ -2217,24 +2294,23 @@ malloc_init_hard(void) { * than LARGE_MINCLASS. It could only happen if some constants * are configured miserably wrong. */ - assert(SC_LG_TINY_MAXCLASS <= - (size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)); + assert(SC_LG_TINY_MAXCLASS <= (size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)); #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif malloc_mutex_lock(TSDN_NULL, &init_lock); -#define UNLOCK_RETURN(tsdn, ret, reentrancy) \ - malloc_init_hard_cleanup(tsdn, reentrancy); \ +#define UNLOCK_RETURN(tsdn, ret, reentrancy) \ + malloc_init_hard_cleanup(tsdn, reentrancy); \ return ret; if (!malloc_init_hard_needed()) { UNLOCK_RETURN(TSDN_NULL, false, false) } - if (malloc_init_state != malloc_init_a0_initialized && - malloc_init_hard_a0_locked()) { + if (malloc_init_state != malloc_init_a0_initialized + && malloc_init_hard_a0_locked()) { UNLOCK_RETURN(TSDN_NULL, true, false) } @@ -2262,11 +2338,11 @@ malloc_init_hard(void) { * background_thread_enabled wasn't initialized yet, but we * need it to set correct value for deferral_allowed. */ - arena_t *a0 = arena_get(tsd_tsdn(tsd), 0, false); + arena_t *a0 = arena_get(tsd_tsdn(tsd), 0, false); hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts; hpa_shard_opts.deferral_allowed = background_thread_enabled(); if (pa_shard_enable_hpa(tsd_tsdn(tsd), &a0->pa_shard, - &hpa_shard_opts, &opt_hpa_sec_opts)) { + &hpa_shard_opts, &opt_hpa_sec_opts)) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } } @@ -2282,8 +2358,8 @@ malloc_init_hard(void) { post_reentrancy(tsd); malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); - witness_assert_lockless(witness_tsd_tsdn( - tsd_witness_tsdp_get_unsafe(tsd))); + witness_assert_lockless( + witness_tsd_tsdn(tsd_witness_tsdp_get_unsafe(tsd))); malloc_tsd_boot1(); /* Update TSD after tsd_boot1. */ tsd = tsd_fetch(); @@ -2378,12 +2454,12 @@ static_opts_init(static_opts_t *static_opts) { typedef struct dynamic_opts_s dynamic_opts_t; struct dynamic_opts_s { - void **result; - size_t usize; - size_t num_items; - size_t item_size; - size_t alignment; - bool zero; + void **result; + size_t usize; + size_t num_items; + size_t item_size; + size_t alignment; + bool zero; unsigned tcache_ind; unsigned arena_ind; }; @@ -2414,8 +2490,9 @@ aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind, if (unlikely(*ind >= SC_NSIZES)) { return true; } - *usize = sz_large_size_classes_disabled()? sz_s2u(size): - sz_index2size(*ind); + *usize = sz_large_size_classes_disabled() + ? sz_s2u(size) + : sz_index2size(*ind); assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS); return false; } @@ -2465,8 +2542,8 @@ JEMALLOC_ALWAYS_INLINE void * imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, size_t size, size_t usize, szind_t ind, bool slab) { /* Fill in the tcache. */ - tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind, - sopts->slow, /* is_alloc */ true); + tcache_t *tcache = tcache_get_from_ind( + tsd, dopts->tcache_ind, sopts->slow, /* is_alloc */ true); /* Fill in the arena. */ arena_t *arena; @@ -2496,7 +2573,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, */ if (sz_can_use_slab(usize)) { assert((dopts->alignment & PROF_SAMPLE_ALIGNMENT_MASK) == 0); - size_t bumped_usize = sz_sa2u(usize, dopts->alignment); + size_t bumped_usize = sz_sa2u(usize, dopts->alignment); szind_t bumped_ind = sz_size2index(bumped_usize); dopts->tcache_ind = TCACHE_IND_NONE; ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, @@ -2519,8 +2596,8 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, * *size to the product either way. */ JEMALLOC_ALWAYS_INLINE bool -compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, - size_t *size) { +compute_size_with_overflow( + bool may_overflow, dynamic_opts_t *dopts, size_t *size) { /* * This function is just num_items * item_size, except that we may have * to check for overflow. @@ -2576,26 +2653,26 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { int8_t reentrancy_level; /* Compute the amount of memory the user wants. */ - if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, - &size))) { + if (unlikely(compute_size_with_overflow( + sopts->may_overflow, dopts, &size))) { goto label_oom; } if (unlikely(dopts->alignment < sopts->min_alignment - || (dopts->alignment & (dopts->alignment - 1)) != 0)) { + || (dopts->alignment & (dopts->alignment - 1)) != 0)) { goto label_invalid_alignment; } /* This is the beginning of the "core" algorithm. */ dopts->zero = zero_get(dopts->zero, sopts->slow); if (aligned_usize_get(size, dopts->alignment, &usize, &ind, - sopts->bump_empty_aligned_alloc)) { + sopts->bump_empty_aligned_alloc)) { goto label_oom; } dopts->usize = usize; /* Validate the user input. */ if (sopts->assert_nonempty_alloc) { - assert (size != 0); + assert(size != 0); } check_entry_exit_locking(tsd_tsdn(tsd)); @@ -2610,8 +2687,8 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { * We should never specify particular arenas or tcaches from * within our internal allocations. */ - assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || - dopts->tcache_ind == TCACHE_IND_NONE); + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC + || dopts->tcache_ind == TCACHE_IND_NONE); assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); dopts->tcache_ind = TCACHE_IND_NONE; /* We know that arena 0 has already been initialized. */ @@ -2628,15 +2705,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { if (config_prof && opt_prof) { bool prof_active = prof_active_get_unlocked(); bool sample_event = te_prof_sample_event_lookahead(tsd, usize); - prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, - sample_event); + prof_tctx_t *tctx = prof_alloc_prep( + tsd, prof_active, sample_event); emap_alloc_ctx_t alloc_ctx; if (likely(tctx == PROF_TCTX_SENTINEL)) { alloc_ctx.slab = sz_can_use_slab(usize); - allocation = imalloc_no_sample( - sopts, dopts, tsd, usize, usize, ind, - alloc_ctx.slab); + allocation = imalloc_no_sample(sopts, dopts, tsd, usize, + usize, ind, alloc_ctx.slab); } else if (tctx != NULL) { allocation = imalloc_sample( sopts, dopts, tsd, usize, ind); @@ -2780,8 +2856,8 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { JEMALLOC_NOINLINE void * malloc_default(size_t size) { - void *ret; - static_opts_t sopts; + void *ret; + static_opts_t sopts; dynamic_opts_t dopts; /* @@ -2819,13 +2895,12 @@ malloc_default(size_t size) { * Begin malloc(3)-compatible functions. */ -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_malloc(size_t size) { +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { LOG("core.malloc.entry", "size: %zu", size); - void * ret = imalloc_fastpath(size, &malloc_default); + void *ret = imalloc_fastpath(size, &malloc_default); LOG("core.malloc.exit", "result: %p", ret); return ret; @@ -2833,13 +2908,15 @@ je_malloc(size_t size) { JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) -je_posix_memalign(void **memptr, size_t alignment, size_t size) { - int ret; - static_opts_t sopts; + je_posix_memalign(void **memptr, size_t alignment, size_t size) { + int ret; + static_opts_t sopts; dynamic_opts_t dopts; - LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " - "size: %zu", memptr, alignment, size); + LOG("core.posix_memalign.entry", + "mem ptr: %p, alignment: %zu, " + "size: %zu", + memptr, alignment, size); static_opts_init(&sopts); dynamic_opts_init(&dopts); @@ -2858,10 +2935,10 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) { ret = imalloc(&sopts, &dopts); if (sopts.slow) { - uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment, - (uintptr_t)size}; - hook_invoke_alloc(hook_alloc_posix_memalign, *memptr, - (uintptr_t)ret, args); + uintptr_t args[3] = { + (uintptr_t)memptr, (uintptr_t)alignment, (uintptr_t)size}; + hook_invoke_alloc( + hook_alloc_posix_memalign, *memptr, (uintptr_t)ret, args); } LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, @@ -2870,13 +2947,13 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) { return ret; } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) -je_aligned_alloc(size_t alignment, size_t size) { +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) + je_aligned_alloc(size_t alignment, size_t size) { void *ret; - static_opts_t sopts; + static_opts_t sopts; dynamic_opts_t dopts; LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", @@ -2902,8 +2979,8 @@ je_aligned_alloc(size_t alignment, size_t size) { imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size}; - hook_invoke_alloc(hook_alloc_aligned_alloc, ret, - (uintptr_t)ret, args); + hook_invoke_alloc( + hook_alloc_aligned_alloc, ret, (uintptr_t)ret, args); } LOG("core.aligned_alloc.exit", "result: %p", ret); @@ -2911,12 +2988,12 @@ je_aligned_alloc(size_t alignment, size_t size) { return ret; } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -je_calloc(size_t num, size_t size) { - void *ret; - static_opts_t sopts; +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) + je_calloc(size_t num, size_t size) { + void *ret; + static_opts_t sopts; dynamic_opts_t dopts; LOG("core.calloc.entry", "num: %zu, size: %zu", num, size); @@ -2959,8 +3036,8 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { assert(malloc_initialized() || IS_INITIALIZER); emap_alloc_ctx_t alloc_ctx; - emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, - &alloc_ctx); + emap_alloc_ctx_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); size_t usize = emap_alloc_ctx_usize_get(&alloc_ctx); @@ -2969,14 +3046,12 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { } if (likely(!slow_path)) { - idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, - false); + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, false); } else { if (config_fill && slow_path && opt_junk_free) { junk_free_callback(ptr, usize); } - idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, - true); + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, true); } thread_dalloc_event(tsd, usize); } @@ -2995,32 +3070,32 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { assert(malloc_initialized() || IS_INITIALIZER); emap_alloc_ctx_t alloc_ctx; - szind_t szind = sz_size2index(usize); + szind_t szind = sz_size2index(usize); if (!config_prof) { - emap_alloc_ctx_init(&alloc_ctx, szind, (szind < SC_NBINS), - usize); + emap_alloc_ctx_init( + &alloc_ctx, szind, (szind < SC_NBINS), usize); } else { if (likely(!prof_sample_aligned(ptr))) { /* * When the ptr is not page aligned, it was not sampled. * usize can be trusted to determine szind and slab. */ - emap_alloc_ctx_init(&alloc_ctx, szind, - (szind < SC_NBINS), usize); + emap_alloc_ctx_init( + &alloc_ctx, szind, (szind < SC_NBINS), usize); } else if (opt_prof) { /* * Small sampled allocs promoted can still get correct * usize here. Check comments in edata_usize_get. */ - emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, - ptr, &alloc_ctx); + emap_alloc_ctx_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &alloc_ctx); if (config_opt_safety_checks) { /* Small alloc may have !slab (sampled). */ - size_t true_size = - emap_alloc_ctx_usize_get(&alloc_ctx); - if (unlikely(alloc_ctx.szind != - sz_size2index(usize))) { + size_t true_size = emap_alloc_ctx_usize_get( + &alloc_ctx); + if (unlikely(alloc_ctx.szind + != sz_size2index(usize))) { safety_check_fail_sized_dealloc( /* current_dealloc */ true, ptr, /* true_size */ true_size, @@ -3028,8 +3103,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { } } } else { - emap_alloc_ctx_init(&alloc_ctx, szind, - (szind < SC_NBINS), usize); + emap_alloc_ctx_init( + &alloc_ctx, szind, (szind < SC_NBINS), usize); } } bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); @@ -3047,14 +3122,12 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { prof_free(tsd, ptr, usize, &alloc_ctx); } if (likely(!slow_path)) { - isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, - false); + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, false); } else { if (config_fill && slow_path && opt_junk_free) { junk_free_callback(ptr, usize); } - isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, - true); + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, true); } thread_dalloc_event(tsd, usize); } @@ -3125,12 +3198,11 @@ je_free_aligned_sized(void *ptr, size_t alignment, size_t size) { */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) -je_memalign(size_t alignment, size_t size) { - void *ret; - static_opts_t sopts; +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ATTR(malloc) je_memalign(size_t alignment, size_t size) { + void *ret; + static_opts_t sopts; dynamic_opts_t dopts; LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, @@ -3155,8 +3227,8 @@ je_memalign(size_t alignment, size_t size) { imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {alignment, size}; - hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret, - args); + hook_invoke_alloc( + hook_alloc_memalign, ret, (uintptr_t)ret, args); } LOG("core.memalign.exit", "result: %p", ret); @@ -3165,13 +3237,12 @@ je_memalign(size_t alignment, size_t size) { #endif #ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) -je_valloc(size_t size) { +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ATTR(malloc) je_valloc(size_t size) { void *ret; - static_opts_t sopts; + static_opts_t sopts; dynamic_opts_t dopts; LOG("core.valloc.entry", "size: %zu\n", size); @@ -3203,13 +3274,12 @@ je_valloc(size_t size) { #endif #ifdef JEMALLOC_OVERRIDE_PVALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) -je_pvalloc(size_t size) { +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ATTR(malloc) je_pvalloc(size_t size) { void *ret; - static_opts_t sopts; + static_opts_t sopts; dynamic_opts_t dopts; LOG("core.pvalloc.entry", "size: %zu\n", size); @@ -3236,8 +3306,8 @@ je_pvalloc(size_t size) { imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {size}; - hook_invoke_alloc(hook_alloc_pvalloc, ret, (uintptr_t)ret, - args); + hook_invoke_alloc( + hook_alloc_pvalloc, ret, (uintptr_t)ret, args); } LOG("core.pvalloc.exit", "result: %p\n", ret); @@ -3255,59 +3325,59 @@ je_pvalloc(size_t size) { * passed an extra argument for the caller return address, which will be * ignored. */ -#include // defines __GLIBC__ if we are compiling against glibc +# include // defines __GLIBC__ if we are compiling against glibc JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; -# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK -JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = - je_memalign; -# endif +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +JEMALLOC_EXPORT void *(*__memalign_hook)( + size_t alignment, size_t size) = je_memalign; +# endif -# ifdef __GLIBC__ +# ifdef __GLIBC__ /* * To enable static linking with glibc, the libc specific malloc interface must * be implemented also, so none of glibc's malloc.o functions are added to the * link. */ -# define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +# define ALIAS(je_fn) __attribute__((alias(#je_fn), used)) /* To force macro expansion of je_ prefix before stringification. */ -# define PREALIAS(je_fn) ALIAS(je_fn) -# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC +# define PREALIAS(je_fn) ALIAS(je_fn) +# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_FREE -void __libc_free(void* ptr) PREALIAS(je_free); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_FREE_SIZED -void __libc_free_sized(void* ptr, size_t size) PREALIAS(je_free_sized); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_FREE_ALIGNED_SIZED -void __libc_free_aligned_sized( - void* ptr, size_t alignment, size_t size) PREALIAS(je_free_aligned_sized); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE +void __libc_free(void *ptr) PREALIAS(je_free); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE_SIZED +void __libc_free_sized(void *ptr, size_t size) PREALIAS(je_free_sized); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE_ALIGNED_SIZED +void __libc_free_aligned_sized(void *ptr, size_t alignment, size_t size) + PREALIAS(je_free_aligned_sized); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC void *__libc_malloc(size_t size) PREALIAS(je_malloc); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC -void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC +void *__libc_realloc(void *ptr, size_t size) PREALIAS(je_realloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC void *__libc_valloc(size_t size) PREALIAS(je_valloc); -# endif -# ifdef JEMALLOC_OVERRIDE___LIBC_PVALLOC +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_PVALLOC void *__libc_pvalloc(size_t size) PREALIAS(je_pvalloc); -# endif -# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN -int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); -# endif -# undef PREALIAS -# undef ALIAS -# endif +# endif +# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN +int __posix_memalign(void **r, size_t a, size_t s) PREALIAS(je_posix_memalign); +# endif +# undef PREALIAS +# undef ALIAS +# endif #endif /* @@ -3340,23 +3410,23 @@ mallocx_arena_get(int flags) { #ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API -#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y -#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ - JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) +# define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x##y +# define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ + JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) typedef struct { - void *ptr; + void *ptr; size_t size; } smallocx_return_t; -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -smallocx_return_t JEMALLOC_NOTHROW -/* +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN smallocx_return_t + JEMALLOC_NOTHROW + /* * The attribute JEMALLOC_ATTR(malloc) cannot be used due to: * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488 */ -JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) - (size_t size, int flags) { + JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)( + size_t size, int flags) { /* * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be * used here because it makes writing beyond the `size` @@ -3365,8 +3435,8 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) * up to `smallocx_return_t::size`. */ smallocx_return_t ret; - static_opts_t sopts; - dynamic_opts_t dopts; + static_opts_t sopts; + dynamic_opts_t dopts; LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags); @@ -3395,16 +3465,16 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size); return ret; } -#undef JEMALLOC_SMALLOCX_CONCAT_HELPER -#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 +# undef JEMALLOC_SMALLOCX_CONCAT_HELPER +# undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 #endif -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_mallocx(size_t size, int flags) { - void *ret; - static_opts_t sopts; +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) + je_mallocx(size_t size, int flags) { + void *ret; + static_opts_t sopts; dynamic_opts_t dopts; LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); @@ -3429,8 +3499,8 @@ je_mallocx(size_t size, int flags) { imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {size, flags}; - hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret, - args); + hook_invoke_alloc( + hook_alloc_mallocx, ret, (uintptr_t)ret, args); } LOG("core.mallocx.exit", "result: %p", ret); @@ -3456,8 +3526,8 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, if (sz_can_use_slab(usize)) { size_t bumped_usize = sz_sa2u(usize, alignment); p = iralloct_explicit_slab(tsdn, old_ptr, old_usize, - bumped_usize, alignment, zero, /* slab */ false, - tcache, arena, hook_args); + bumped_usize, alignment, zero, /* slab */ false, tcache, + arena, hook_args); if (p == NULL) { return NULL; } @@ -3474,15 +3544,14 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, JEMALLOC_ALWAYS_INLINE void * irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, - size_t alignment, size_t usize, bool zero, tcache_t *tcache, - arena_t *arena, emap_alloc_ctx_t *alloc_ctx, - hook_ralloc_args_t *hook_args) { + size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena, + emap_alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { prof_info_t old_prof_info; prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info); - bool prof_active = prof_active_get_unlocked(); - bool sample_event = te_prof_sample_event_lookahead(tsd, usize); + bool prof_active = prof_active_get_unlocked(); + bool sample_event = te_prof_sample_event_lookahead(tsd, usize); prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event); - void *p; + void *p; if (unlikely(tctx != PROF_TCTX_SENTINEL)) { p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, usize, alignment, zero, tcache, arena, tctx, hook_args); @@ -3495,19 +3564,19 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, return NULL; } assert(usize == isalloc(tsd_tsdn(tsd), p)); - prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr, - old_usize, &old_prof_info, sample_event); + prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr, old_usize, + &old_prof_info, sample_event); return p; } static void * do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { - void *p; - tsd_t *tsd; - size_t usize; - size_t old_usize; - size_t alignment = MALLOCX_ALIGN_GET(flags); + void *p; + tsd_t *tsd; + size_t usize; + size_t old_usize; + size_t alignment = MALLOCX_ALIGN_GET(flags); arena_t *arena; assert(ptr != NULL); @@ -3523,13 +3592,13 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { goto label_oom; } - unsigned tcache_ind = mallocx_tcache_get(flags); + unsigned tcache_ind = mallocx_tcache_get(flags); tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, /* slow */ true, /* is_alloc */ true); emap_alloc_ctx_t alloc_ctx; - emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, - &alloc_ctx); + emap_alloc_ctx_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); old_usize = emap_alloc_ctx_usize_get(&alloc_ctx); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); @@ -3537,8 +3606,8 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { goto label_oom; } - hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size, - flags, 0}}; + hook_ralloc_args_t hook_args = { + is_realloc, {(uintptr_t)ptr, size, flags, 0}}; if (config_prof && opt_prof) { p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize, zero, tcache, arena, &alloc_ctx, &hook_args); @@ -3563,7 +3632,7 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize && !zero) { size_t excess_len = usize - old_usize; - void *excess_start = (void *)((byte_t *)p + old_usize); + void *excess_start = (void *)((byte_t *)p + old_usize); junk_alloc_callback(excess_start, excess_len); } @@ -3582,12 +3651,11 @@ label_oom: return NULL; } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_rallocx(void *ptr, size_t size, int flags) { - LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, - size, flags); +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ALLOC_SIZE(2) je_rallocx(void *ptr, size_t size, int flags) { + LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, + flags); void *ret = do_rallocx(ptr, size, flags, false); LOG("core.rallocx.exit", "result: %p", ret); return ret; @@ -3621,7 +3689,8 @@ do_realloc_nonnull_zero(void *ptr) { check_entry_exit_locking(tsd_tsdn(tsd)); return NULL; } else { - safety_check_fail("Called realloc(non-null-ptr, 0) with " + safety_check_fail( + "Called realloc(non-null-ptr, 0) with " "zero_realloc:abort set\n"); /* In real code, this will never run; the safety check failure * will call abort. In the unit test, we just want to bail out @@ -3632,10 +3701,9 @@ do_realloc_nonnull_zero(void *ptr) { } } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_realloc(void *ptr, size_t size) { +JEMALLOC_EXPORT + JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * + JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t size) { LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (likely(ptr != NULL && size != 0)) { @@ -3650,7 +3718,7 @@ je_realloc(void *ptr, size_t size) { /* realloc(NULL, size) is equivalent to malloc(size). */ void *ret; - static_opts_t sopts; + static_opts_t sopts; dynamic_opts_t dopts; static_opts_init(&sopts); @@ -3668,8 +3736,8 @@ je_realloc(void *ptr, size_t size) { imalloc(&sopts, &dopts); if (sopts.slow) { uintptr_t args[3] = {(uintptr_t)ptr, size}; - hook_invoke_alloc(hook_alloc_realloc, ret, - (uintptr_t)ret, args); + hook_invoke_alloc( + hook_alloc_realloc, ret, (uintptr_t)ret, args); } LOG("core.realloc.exit", "result: %p", ret); return ret; @@ -3681,8 +3749,8 @@ ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t newsize; - if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero, - &newsize)) { + if (ixalloc( + tsdn, ptr, old_usize, size, extra, alignment, zero, &newsize)) { return old_usize; } @@ -3697,8 +3765,8 @@ ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, return old_usize; } - return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, - zero); + return ixallocx_helper( + tsdn, ptr, old_usize, size, extra, alignment, zero); } JEMALLOC_ALWAYS_INLINE size_t @@ -3718,8 +3786,8 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, * prof_realloc() will use the actual usize to decide whether to sample. */ size_t usize_max; - if (aligned_usize_get(size + extra, alignment, &usize_max, NULL, - false)) { + if (aligned_usize_get( + size + extra, alignment, &usize_max, NULL, false)) { /* * usize_max is out of range, and chances are that allocation * will fail, but use the maximum possible value and carry on @@ -3758,10 +3826,10 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, * to edata has already been done. */ emap_alloc_ctx_t new_alloc_ctx; - emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, - &new_alloc_ctx); - prof_info_get_and_reset_recent(tsd, ptr, &new_alloc_ctx, - &prof_info); + emap_alloc_ctx_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &new_alloc_ctx); + prof_info_get_and_reset_recent( + tsd, ptr, &new_alloc_ctx, &prof_info); assert(usize <= usize_max); sample_event = te_prof_sample_event_lookahead(tsd, usize); prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr, @@ -3777,10 +3845,12 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; size_t usize, old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); + bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); - LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " - "flags: %d", ptr, size, extra, flags); + LOG("core.xallocx.entry", + "ptr: %p, size: %zu, extra: %zu, " + "flags: %d", + ptr, size, extra, flags); assert(ptr != NULL); assert(size != 0); @@ -3794,12 +3864,12 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { * object associated with the ptr (though the content of the edata_t * object can be changed). */ - edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd), - &arena_emap_global, ptr); + edata_t *old_edata = emap_edata_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr); emap_alloc_ctx_t alloc_ctx; - emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, - &alloc_ctx); + emap_alloc_ctx_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr, &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); old_usize = emap_alloc_ctx_usize_get(&alloc_ctx); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); @@ -3841,17 +3911,17 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { thread_alloc_event(tsd, usize); thread_dalloc_event(tsd, old_usize); - if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize && - !zero) { + if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize + && !zero) { size_t excess_len = usize - old_usize; - void *excess_start = (void *)((byte_t *)ptr + old_usize); + void *excess_start = (void *)((byte_t *)ptr + old_usize); junk_alloc_callback(excess_start, excess_len); } label_not_resized: if (unlikely(!tsd_fast(tsd))) { uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags}; - hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, - usize, (uintptr_t)usize, args); + hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, usize, + (uintptr_t)usize, args); } UTRACE(ptr, size, ptr); @@ -3862,9 +3932,8 @@ label_not_resized: } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -JEMALLOC_ATTR(pure) -je_sallocx(const void *ptr, int flags) { - size_t usize; +JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, int flags) { + size_t usize; tsdn_t *tsdn; LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); @@ -3896,10 +3965,10 @@ je_dallocx(void *ptr, int flags) { assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch_min(); - bool fast = tsd_fast(tsd); + bool fast = tsd_fast(tsd); check_entry_exit_locking(tsd_tsdn(tsd)); - unsigned tcache_ind = mallocx_tcache_get(flags); + unsigned tcache_ind = mallocx_tcache_get(flags); tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast, /* is_alloc */ false); @@ -3933,11 +4002,11 @@ sdallocx_default(void *ptr, size_t size, int flags) { assert(malloc_initialized() || IS_INITIALIZER); tsd_t *tsd = tsd_fetch_min(); - bool fast = tsd_fast(tsd); + bool fast = tsd_fast(tsd); size_t usize = inallocx(tsd_tsdn(tsd), size, flags); check_entry_exit_locking(tsd_tsdn(tsd)); - unsigned tcache_ind = mallocx_tcache_get(flags); + unsigned tcache_ind = mallocx_tcache_get(flags); tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast, /* is_alloc */ false); @@ -3955,8 +4024,8 @@ sdallocx_default(void *ptr, size_t size, int flags) { JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags) { - LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, - size, flags); + LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, + flags); je_sdallocx_impl(ptr, size, flags); @@ -3964,9 +4033,8 @@ je_sdallocx(void *ptr, size_t size, int flags) { } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -JEMALLOC_ATTR(pure) -je_nallocx(size_t size, int flags) { - size_t usize; +JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { + size_t usize; tsdn_t *tsdn; assert(size != 0); @@ -3991,9 +4059,9 @@ je_nallocx(size_t size, int flags) { } JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) { - int ret; +je_mallctl( + const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; tsd_t *tsd; LOG("core.mallctl.entry", "name: %s", name); @@ -4034,8 +4102,8 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) { - int ret; + void *newp, size_t newlen) { + int ret; tsd_t *tsd; LOG("core.mallctlbymib.entry", ""); @@ -4055,8 +4123,8 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, #define STATS_PRINT_BUFSIZE 65536 JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) { +je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { tsdn_t *tsdn; LOG("core.malloc_stats_print.entry", ""); @@ -4127,12 +4195,12 @@ je_malloc_size(const void *ptr) { static void batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) { assert(config_prof && opt_prof); - bool prof_sample_event = te_prof_sample_event_lookahead(tsd, - batch * usize); + bool prof_sample_event = te_prof_sample_event_lookahead( + tsd, batch * usize); assert(!prof_sample_event); size_t surplus; - prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd, - (batch + 1) * usize, &surplus); + prof_sample_event = te_prof_sample_event_lookahead_surplus( + tsd, (batch + 1) * usize, &surplus); assert(prof_sample_event); assert(surplus < usize); } @@ -4157,14 +4225,14 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) { goto label_done; } szind_t ind = sz_size2index(usize); - bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); + bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); /* * The cache bin and arena will be lazily initialized; it's hard to * know in advance whether each of them needs to be initialized. */ cache_bin_t *bin = NULL; - arena_t *arena = NULL; + arena_t *arena = NULL; size_t nregs = 0; if (likely(ind < SC_NBINS)) { @@ -4175,10 +4243,10 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) { while (filled < num) { size_t batch = num - filled; size_t surplus = SIZE_MAX; /* Dead store. */ - bool prof_sample_event = config_prof && opt_prof + bool prof_sample_event = config_prof && opt_prof && prof_active_get_unlocked() - && te_prof_sample_event_lookahead_surplus(tsd, - batch * usize, &surplus); + && te_prof_sample_event_lookahead_surplus( + tsd, batch * usize, &surplus); if (prof_sample_event) { /* @@ -4194,8 +4262,8 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) { if (likely(ind < SC_NBINS) && batch >= nregs) { if (arena == NULL) { unsigned arena_ind = mallocx_arena_get(flags); - if (arena_get_from_ind(tsd, arena_ind, - &arena)) { + if (arena_get_from_ind( + tsd, arena_ind, &arena)) { goto label_done; } if (arena == NULL) { @@ -4212,13 +4280,14 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) { filled += n; } - unsigned tcache_ind = mallocx_tcache_get(flags); + unsigned tcache_ind = mallocx_tcache_get(flags); tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, /* slow */ true, /* is_alloc */ true); - if (likely(tcache != NULL && - ind < tcache_nbins_get(tcache->tcache_slow) && - !tcache_bin_disabled(ind, &tcache->bins[ind], - tcache->tcache_slow)) && progress < batch) { + if (likely(tcache != NULL + && ind < tcache_nbins_get(tcache->tcache_slow) + && !tcache_bin_disabled( + ind, &tcache->bins[ind], tcache->tcache_slow)) + && progress < batch) { if (bin == NULL) { bin = &tcache->bins[ind]; } @@ -4249,22 +4318,22 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) { * additional benefit is that the tcache will * not be empty for the next allocation request. */ - size_t n = cache_bin_alloc_batch(bin, bin_batch, - ptrs + filled); + size_t n = cache_bin_alloc_batch( + bin, bin_batch, ptrs + filled); if (config_stats) { bin->tstats.nrequests += n; } if (zero) { for (size_t i = 0; i < n; ++i) { - memset(ptrs[filled + i], 0, - usize); + memset( + ptrs[filled + i], 0, usize); } } if (config_prof && opt_prof && unlikely(ind >= SC_NBINS)) { for (size_t i = 0; i < n; ++i) { - prof_tctx_reset_sampled(tsd, - ptrs[filled + i]); + prof_tctx_reset_sampled( + tsd, ptrs[filled + i]); } } progress += n; @@ -4340,7 +4409,7 @@ JEMALLOC_EXPORT void _malloc_prefork(void) #endif { - tsd_t *tsd; + tsd_t *tsd; unsigned i, j, narenas; arena_t *arena; @@ -4370,8 +4439,8 @@ _malloc_prefork(void) /* Break arena prefork into stages to preserve lock order. */ for (i = 0; i < 9; i++) { for (j = 0; j < narenas; j++) { - if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != - NULL) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) + != NULL) { switch (i) { case 0: arena_prefork0(tsd_tsdn(tsd), arena); @@ -4400,11 +4469,11 @@ _malloc_prefork(void) case 8: arena_prefork8(tsd_tsdn(tsd), arena); break; - default: not_reached(); + default: + not_reached(); } } } - } prof_prefork1(tsd_tsdn(tsd)); stats_prefork(tsd_tsdn(tsd)); @@ -4419,7 +4488,7 @@ JEMALLOC_EXPORT void _malloc_postfork(void) #endif { - tsd_t *tsd; + tsd_t *tsd; unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB @@ -4454,7 +4523,7 @@ _malloc_postfork(void) void jemalloc_postfork_child(void) { - tsd_t *tsd; + tsd_t *tsd; unsigned i, narenas; assert(malloc_initialized()); diff --git a/src/jemalloc_cpp.cpp b/src/jemalloc_cpp.cpp index fffd6aee..4e838d3b 100644 --- a/src/jemalloc_cpp.cpp +++ b/src/jemalloc_cpp.cpp @@ -24,45 +24,52 @@ extern "C" { // // ... but it needs to work with jemalloc namespaces. -void *operator new(std::size_t size); -void *operator new[](std::size_t size); -void *operator new(std::size_t size, const std::nothrow_t &) noexcept; -void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; -void operator delete(void *ptr) noexcept; -void operator delete[](void *ptr) noexcept; -void operator delete(void *ptr, const std::nothrow_t &) noexcept; -void operator delete[](void *ptr, const std::nothrow_t &) noexcept; +void *operator new(std::size_t size); +void *operator new[](std::size_t size); +void *operator new(std::size_t size, const std::nothrow_t &) noexcept; +void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; +void operator delete(void *ptr) noexcept; +void operator delete[](void *ptr) noexcept; +void operator delete(void *ptr, const std::nothrow_t &) noexcept; +void operator delete[](void *ptr, const std::nothrow_t &) noexcept; #if __cpp_sized_deallocation >= 201309 /* C++14's sized-delete operators. */ -void operator delete(void *ptr, std::size_t size) noexcept; -void operator delete[](void *ptr, std::size_t size) noexcept; +void operator delete(void *ptr, std::size_t size) noexcept; +void operator delete[](void *ptr, std::size_t size) noexcept; #endif #if __cpp_aligned_new >= 201606 /* C++17's over-aligned operators. */ -void *operator new(std::size_t size, std::align_val_t); -void *operator new(std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept; -void *operator new[](std::size_t size, std::align_val_t); -void *operator new[](std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept; -void operator delete(void* ptr, std::align_val_t) noexcept; -void operator delete(void* ptr, std::align_val_t, const std::nothrow_t &) noexcept; -void operator delete(void* ptr, std::size_t size, std::align_val_t al) noexcept; -void operator delete[](void* ptr, std::align_val_t) noexcept; -void operator delete[](void* ptr, std::align_val_t, const std::nothrow_t &) noexcept; -void operator delete[](void* ptr, std::size_t size, std::align_val_t al) noexcept; +void *operator new(std::size_t size, std::align_val_t); +void *operator new( + std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept; +void *operator new[](std::size_t size, std::align_val_t); +void *operator new[]( + std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept; +void operator delete(void *ptr, std::align_val_t) noexcept; +void operator delete( + void *ptr, std::align_val_t, const std::nothrow_t &) noexcept; +void operator delete(void *ptr, std::size_t size, std::align_val_t al) noexcept; +void operator delete[](void *ptr, std::align_val_t) noexcept; +void operator delete[]( + void *ptr, std::align_val_t, const std::nothrow_t &) noexcept; +void operator delete[]( + void *ptr, std::size_t size, std::align_val_t al) noexcept; #endif JEMALLOC_NOINLINE static void * handleOOM(std::size_t size, bool nothrow) { if (opt_experimental_infallible_new) { - const char *huge_warning = (size >= ((std::size_t)1 << 30)) ? - "This may be caused by heap corruption, if the large size " - "is unexpected (suggest building with sanitizers for " - "debugging)." : ""; + const char *huge_warning = (size >= ((std::size_t)1 << 30)) + ? "This may be caused by heap corruption, if the large size " + "is unexpected (suggest building with sanitizers for " + "debugging)." + : ""; - safety_check_fail(": Allocation of size %zu failed. " + safety_check_fail( + ": Allocation of size %zu failed. " "%s opt.experimental_infallible_new is true. Aborting.\n", size, huge_warning); return nullptr; @@ -74,7 +81,7 @@ handleOOM(std::size_t size, bool nothrow) { std::new_handler handler; // GCC-4.8 and clang 4.0 do not have std::get_new_handler. { - static std::mutex mtx; + static std::mutex mtx; std::lock_guard lock(mtx); handler = std::set_new_handler(nullptr); @@ -98,8 +105,7 @@ handleOOM(std::size_t size, bool nothrow) { } template -JEMALLOC_NOINLINE -static void * +JEMALLOC_NOINLINE static void * fallbackNewImpl(std::size_t size) noexcept(IsNoExcept) { void *ptr = malloc_default(size); if (likely(ptr != nullptr)) { @@ -109,12 +115,11 @@ fallbackNewImpl(std::size_t size) noexcept(IsNoExcept) { } template -JEMALLOC_ALWAYS_INLINE -void * +JEMALLOC_ALWAYS_INLINE void * newImpl(std::size_t size) noexcept(IsNoExcept) { LOG("core.operator_new.entry", "size: %zu", size); - void * ret = imalloc_fastpath(size, &fallbackNewImpl); + void *ret = imalloc_fastpath(size, &fallbackNewImpl); LOG("core.operator_new.exit", "result: %p", ret); return ret; @@ -143,9 +148,9 @@ operator new[](std::size_t size, const std::nothrow_t &) noexcept { #if __cpp_aligned_new >= 201606 template -JEMALLOC_ALWAYS_INLINE -void * -alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept(IsNoExcept) { +JEMALLOC_ALWAYS_INLINE void * +alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept( + IsNoExcept) { void *ptr = je_aligned_alloc(static_cast(alignment), size); if (likely(ptr != nullptr)) { return ptr; @@ -165,16 +170,18 @@ operator new[](std::size_t size, std::align_val_t alignment) { } void * -operator new(std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept { +operator new(std::size_t size, std::align_val_t alignment, + const std::nothrow_t &) noexcept { return alignedNewImpl(size, alignment); } void * -operator new[](std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept { +operator new[](std::size_t size, std::align_val_t alignment, + const std::nothrow_t &) noexcept { return alignedNewImpl(size, alignment); } -#endif // __cpp_aligned_new +#endif // __cpp_aligned_new void operator delete(void *ptr) noexcept { @@ -203,7 +210,8 @@ operator delete(void *ptr, const std::nothrow_t &) noexcept { LOG("core.operator_delete.exit", ""); } -void operator delete[](void *ptr, const std::nothrow_t &) noexcept { +void +operator delete[](void *ptr, const std::nothrow_t &) noexcept { LOG("core.operator_delete.entry", "ptr: %p", ptr); je_free_impl(ptr); @@ -215,7 +223,7 @@ void operator delete[](void *ptr, const std::nothrow_t &) noexcept { JEMALLOC_ALWAYS_INLINE void -sizedDeleteImpl(void* ptr, std::size_t size) noexcept { +sizedDeleteImpl(void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } @@ -236,14 +244,14 @@ operator delete[](void *ptr, std::size_t size) noexcept { sizedDeleteImpl(ptr, size); } -#endif // __cpp_sized_deallocation +#endif // __cpp_sized_deallocation #if __cpp_aligned_new >= 201606 JEMALLOC_ALWAYS_INLINE void -alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment) - noexcept { +alignedSizedDeleteImpl( + void *ptr, std::size_t size, std::align_val_t alignment) noexcept { if (config_debug) { assert(((size_t)alignment & ((size_t)alignment - 1)) == 0); } @@ -259,7 +267,7 @@ alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment) } void -operator delete(void* ptr, std::align_val_t) noexcept { +operator delete(void *ptr, std::align_val_t) noexcept { LOG("core.operator_delete.entry", "ptr: %p", ptr); je_free_impl(ptr); @@ -268,7 +276,7 @@ operator delete(void* ptr, std::align_val_t) noexcept { } void -operator delete[](void* ptr, std::align_val_t) noexcept { +operator delete[](void *ptr, std::align_val_t) noexcept { LOG("core.operator_delete.entry", "ptr: %p", ptr); je_free_impl(ptr); @@ -277,7 +285,7 @@ operator delete[](void* ptr, std::align_val_t) noexcept { } void -operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept { +operator delete(void *ptr, std::align_val_t, const std::nothrow_t &) noexcept { LOG("core.operator_delete.entry", "ptr: %p", ptr); je_free_impl(ptr); @@ -286,7 +294,8 @@ operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept { } void -operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept { +operator delete[]( + void *ptr, std::align_val_t, const std::nothrow_t &) noexcept { LOG("core.operator_delete.entry", "ptr: %p", ptr); je_free_impl(ptr); @@ -295,14 +304,16 @@ operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept { } void -operator delete(void* ptr, std::size_t size, std::align_val_t alignment) noexcept { +operator delete( + void *ptr, std::size_t size, std::align_val_t alignment) noexcept { alignedSizedDeleteImpl(ptr, size, alignment); } void -operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexcept { +operator delete[]( + void *ptr, std::size_t size, std::align_val_t alignment) noexcept { alignedSizedDeleteImpl(ptr, size, alignment); } -#endif // __cpp_aligned_new +#endif // __cpp_aligned_new // NOLINTEND(misc-use-anonymous-namespace) diff --git a/src/large.c b/src/large.c index d78085f0..7cae61ae 100644 --- a/src/large.c +++ b/src/large.c @@ -18,10 +18,10 @@ large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { } void * -large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero) { - size_t ausize; - edata_t *edata; +large_palloc( + tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { + size_t ausize; + edata_t *edata; UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); assert(!tsdn_null(tsdn) || arena != NULL); @@ -34,8 +34,10 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, if (likely(!tsdn_null(tsdn))) { arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); } - if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn, - arena, usize, alignment, zero)) == NULL) { + if (unlikely(arena == NULL) + || (edata = arena_extent_alloc_large( + tsdn, arena, usize, alignment, zero)) + == NULL) { return NULL; } @@ -53,10 +55,10 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, static bool large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) { - arena_t *arena = arena_get_from_edata(edata); + arena_t *arena = arena_get_from_edata(edata); ehooks_t *ehooks = arena_get_ehooks(arena); - size_t old_size = edata_size_get(edata); - size_t old_usize = edata_usize_get(edata); + size_t old_size = edata_size_get(edata); + size_t old_usize = edata_usize_get(edata); assert(old_usize > usize); @@ -80,8 +82,8 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) { } static bool -large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, - bool zero) { +large_ralloc_no_move_expand( + tsdn_t *tsdn, edata_t *edata, size_t usize, bool zero) { arena_t *arena = arena_get_from_edata(edata); size_t old_size = edata_size_get(edata); @@ -112,10 +114,10 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, * offset from the beginning of the extent is a multiple * of CACHELINE in [0 .. PAGE). */ - void *zbase = (void *) - ((byte_t *)edata_addr_get(edata) + old_usize); - void *zpast = PAGE_ADDR2BASE((void *)((byte_t *)zbase + - PAGE)); + void *zbase = (void *)((byte_t *)edata_addr_get(edata) + + old_usize); + void *zpast = PAGE_ADDR2BASE( + (void *)((byte_t *)zbase + PAGE)); size_t nzero = (byte_t *)zpast - (byte_t *)zbase; assert(nzero > 0); memset(zbase, 0, nzero); @@ -134,19 +136,19 @@ large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min, /* The following should have been caught by callers. */ assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ - assert(oldusize >= SC_LARGE_MINCLASS - && usize_max >= SC_LARGE_MINCLASS); + assert(oldusize >= SC_LARGE_MINCLASS && usize_max >= SC_LARGE_MINCLASS); if (usize_max > oldusize) { /* Attempt to expand the allocation in-place. */ - if (!large_ralloc_no_move_expand(tsdn, edata, usize_max, - zero)) { + if (!large_ralloc_no_move_expand( + tsdn, edata, usize_max, zero)) { arena_decay_tick(tsdn, arena_get_from_edata(edata)); return false; } /* Try again, this time with usize_min. */ - if (usize_min < usize_max && usize_min > oldusize && - large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) { + if (usize_min < usize_max && usize_min > oldusize + && large_ralloc_no_move_expand( + tsdn, edata, usize_min, zero)) { arena_decay_tick(tsdn, arena_get_from_edata(edata)); return false; } @@ -172,8 +174,8 @@ large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min, } static void * -large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero) { +large_ralloc_move_helper( + tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { if (alignment <= CACHELINE) { return large_malloc(tsdn, arena, usize, zero); } @@ -190,14 +192,13 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, /* The following should have been caught by callers. */ assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ - assert(oldusize >= SC_LARGE_MINCLASS - && usize >= SC_LARGE_MINCLASS); + assert(oldusize >= SC_LARGE_MINCLASS && usize >= SC_LARGE_MINCLASS); /* Try to avoid moving the allocation. */ if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) { - hook_invoke_expand(hook_args->is_realloc - ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize, - usize, (uintptr_t)ptr, hook_args->args); + hook_invoke_expand(hook_args->is_realloc ? hook_expand_realloc + : hook_expand_rallocx, + ptr, oldusize, usize, (uintptr_t)ptr, hook_args->args); return edata_addr_get(edata); } @@ -206,17 +207,18 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, * different size class. In that case, fall back to allocating new * space and copying. */ - void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, - zero); + void *ret = large_ralloc_move_helper( + tsdn, arena, usize, alignment, zero); if (ret == NULL) { return NULL; } - hook_invoke_alloc(hook_args->is_realloc - ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, - hook_args->args); - hook_invoke_dalloc(hook_args->is_realloc - ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); + hook_invoke_alloc( + hook_args->is_realloc ? hook_alloc_realloc : hook_alloc_rallocx, + ret, (uintptr_t)ret, hook_args->args); + hook_invoke_dalloc( + hook_args->is_realloc ? hook_dalloc_realloc : hook_dalloc_rallocx, + ptr, hook_args->args); size_t copysize = (usize < oldusize) ? usize : oldusize; memcpy(ret, edata_addr_get(edata), copysize); @@ -228,8 +230,8 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, * locked indicates whether the arena's large_mtx is currently held. */ static void -large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata, - bool locked) { +large_dalloc_prep_impl( + tsdn_t *tsdn, arena_t *arena, edata_t *edata, bool locked) { if (!locked) { /* See comments in arena_bin_slabs_full_insert(). */ if (!arena_is_auto(arena)) { @@ -280,16 +282,16 @@ large_salloc(tsdn_t *tsdn, const edata_t *edata) { } void -large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, - bool reset_recent) { +large_prof_info_get( + tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, bool reset_recent) { assert(prof_info != NULL); prof_tctx_t *alloc_tctx = edata_prof_tctx_get(edata); prof_info->alloc_tctx = alloc_tctx; if (prof_tctx_is_valid(alloc_tctx)) { - nstime_copy(&prof_info->alloc_time, - edata_prof_alloc_time_get(edata)); + nstime_copy( + &prof_info->alloc_time, edata_prof_alloc_time_get(edata)); prof_info->alloc_size = edata_prof_alloc_size_get(edata); if (reset_recent) { /* diff --git a/src/log.c b/src/log.c index 778902fb..9b1c6261 100644 --- a/src/log.c +++ b/src/log.c @@ -3,7 +3,7 @@ #include "jemalloc/internal/log.h" -char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; +char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; atomic_b_t log_init_done = ATOMIC_INIT(false); /* @@ -11,7 +11,7 @@ atomic_b_t log_init_done = ATOMIC_INIT(false); * with a pointer to the first character after the end of the string. */ static const char * -log_var_extract_segment(const char* segment_begin) { +log_var_extract_segment(const char *segment_begin) { const char *end; for (end = segment_begin; *end != '\0' && *end != '|'; end++) { } @@ -30,12 +30,12 @@ log_var_matches_segment(const char *segment_begin, const char *segment_end, if (segment_len == 1 && *segment_begin == '.') { return true; } - if (segment_len == log_var_len) { + if (segment_len == log_var_len) { return strncmp(segment_begin, log_var_begin, segment_len) == 0; } else if (segment_len < log_var_len) { return strncmp(segment_begin, log_var_begin, segment_len) == 0 && log_var_begin[segment_len] == '.'; - } else { + } else { return false; } } @@ -61,9 +61,9 @@ log_var_update_state(log_var_t *log_var) { segment_begin); assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE); if (log_var_matches_segment(segment_begin, segment_end, - log_var_begin, log_var_end)) { - atomic_store_u(&log_var->state, LOG_ENABLED, - ATOMIC_RELAXED); + log_var_begin, log_var_end)) { + atomic_store_u( + &log_var->state, LOG_ENABLED, ATOMIC_RELAXED); return LOG_ENABLED; } if (*segment_end == '\0') { diff --git a/src/malloc_io.c b/src/malloc_io.c index 192d8208..0c5d6c03 100644 --- a/src/malloc_io.c +++ b/src/malloc_io.c @@ -5,63 +5,68 @@ #include "jemalloc/internal/util.h" #ifdef assert -# undef assert +# undef assert #endif #ifdef not_reached -# undef not_reached +# undef not_reached #endif #ifdef not_implemented -# undef not_implemented +# undef not_implemented #endif #ifdef assert_not_implemented -# undef assert_not_implemented +# undef assert_not_implemented #endif /* * Define simple versions of assertion macros that won't recurse in case * of assertion failures in malloc_*printf(). */ -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_write(": Failed assertion\n"); \ - abort(); \ - } \ -} while (0) +#define assert(e) \ + do { \ + if (config_debug && !(e)) { \ + malloc_write(": Failed assertion\n"); \ + abort(); \ + } \ + } while (0) -#define not_reached() do { \ - if (config_debug) { \ - malloc_write(": Unreachable code reached\n"); \ - abort(); \ - } \ - unreachable(); \ -} while (0) +#define not_reached() \ + do { \ + if (config_debug) { \ + malloc_write( \ + ": Unreachable code reached\n"); \ + abort(); \ + } \ + unreachable(); \ + } while (0) -#define not_implemented() do { \ - if (config_debug) { \ - malloc_write(": Not implemented\n"); \ - abort(); \ - } \ -} while (0) +#define not_implemented() \ + do { \ + if (config_debug) { \ + malloc_write(": Not implemented\n"); \ + abort(); \ + } \ + } while (0) -#define assert_not_implemented(e) do { \ - if (unlikely(config_debug && !(e))) { \ - not_implemented(); \ - } \ -} while (0) +#define assert_not_implemented(e) \ + do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ + } while (0) /******************************************************************************/ /* Function prototypes for non-inline static functions. */ #define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) -static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, - size_t *slen_p); +static char *u2s( + uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); #define D2S_BUFSIZE (1 + U2S_BUFSIZE) static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); #define O2S_BUFSIZE (1 + U2S_BUFSIZE) static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); #define X2S_BUFSIZE (2 + U2S_BUFSIZE) -static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, - size_t *slen_p); +static char *x2s( + uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p); /******************************************************************************/ @@ -71,7 +76,7 @@ wrtmessage(void *cbopaque, const char *s) { malloc_write_fd(STDERR_FILENO, s, strlen(s)); } -JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); +JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); /* * Wrapper around malloc_message() that avoids the need for @@ -93,14 +98,15 @@ malloc_write(const char *s) { int buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, - (LPSTR)buf, (DWORD)buflen, NULL); + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPSTR)buf, + (DWORD)buflen, NULL); return 0; -#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE) +#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) \ + && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); - buf[buflen-1] = '\0'; + buf[buflen - 1] = '\0'; } return 0; #else @@ -110,9 +116,9 @@ buferror(int err, char *buf, size_t buflen) { uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { - uintmax_t ret, digit; - unsigned b; - bool neg; + uintmax_t ret, digit; + unsigned b; + bool neg; const char *p, *ns; p = nptr; @@ -128,7 +134,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { neg = false; while (true) { switch (*p) { - case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': + case '\t': + case '\n': + case '\v': + case '\f': + case '\r': + case ' ': p++; break; case '-': @@ -142,8 +153,8 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { } } - /* Get prefix, if any. */ - label_prefix: +/* Get prefix, if any. */ +label_prefix: /* * Note where the first non-whitespace/sign character is so that it is * possible to tell whether any digits are consumed (e.g., " 0" vs. @@ -152,8 +163,14 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { ns = p; if (*p == '0') { switch (p[1]) { - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': if (b == 0) { b = 8; } @@ -161,13 +178,30 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { p++; } break; - case 'X': case 'x': + case 'X': + case 'x': switch (p[2]) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'A': case 'B': case 'C': case 'D': case 'E': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': case 'F': - case 'a': case 'b': case 'c': case 'd': case 'e': + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': case 'f': if (b == 0) { b = 16; @@ -244,9 +278,8 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { } while (x > 0); break; case 16: { - const char *digits = (uppercase) - ? "0123456789ABCDEF" - : "0123456789abcdef"; + const char *digits = (uppercase) ? "0123456789ABCDEF" + : "0123456789abcdef"; do { i--; @@ -254,7 +287,8 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { x >>= 4; } while (x > 0); break; - } default: { + } + default: { const char *digits = (uppercase) ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" : "0123456789abcdefghijklmnopqrstuvwxyz"; @@ -265,7 +299,8 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { s[i] = digits[x % (uint64_t)base]; x /= (uint64_t)base; } while (x > 0); - }} + } + } *slen_p = U2S_BUFSIZE - 1 - i; return &s[i]; @@ -294,7 +329,8 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) { (*slen_p)++; *s = sign; break; - default: not_reached(); + default: + not_reached(); } return s; } @@ -325,106 +361,112 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { JEMALLOC_COLD size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { - size_t i; + size_t i; const char *f; -#define APPEND_C(c) do { \ - if (i < size) { \ - str[i] = (c); \ - } \ - i++; \ -} while (0) -#define APPEND_S(s, slen) do { \ - if (i < size) { \ - size_t cpylen = (slen <= size - i) ? slen : size - i; \ - memcpy(&str[i], s, cpylen); \ - } \ - i += slen; \ -} while (0) -#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ - /* Left padding. */ \ - size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ - (size_t)width - slen : 0); \ - if (!left_justify && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) { \ - if (pad_zero) { \ - APPEND_C('0'); \ - } else { \ - APPEND_C(' '); \ - } \ - } \ - } \ - /* Value. */ \ - APPEND_S(s, slen); \ - /* Right padding. */ \ - if (left_justify && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) { \ - APPEND_C(' '); \ - } \ - } \ -} while (0) -#define GET_ARG_NUMERIC(val, len) do { \ - switch ((unsigned char)len) { \ - case '?': \ - val = va_arg(ap, int); \ - break; \ - case '?' | 0x80: \ - val = va_arg(ap, unsigned int); \ - break; \ - case 'l': \ - val = va_arg(ap, long); \ - break; \ - case 'l' | 0x80: \ - val = va_arg(ap, unsigned long); \ - break; \ - case 'q': \ - val = va_arg(ap, long long); \ - break; \ - case 'q' | 0x80: \ - val = va_arg(ap, unsigned long long); \ - break; \ - case 'j': \ - val = va_arg(ap, intmax_t); \ - break; \ - case 'j' | 0x80: \ - val = va_arg(ap, uintmax_t); \ - break; \ - case 't': \ - val = va_arg(ap, ptrdiff_t); \ - break; \ - case 'z': \ - val = va_arg(ap, ssize_t); \ - break; \ - case 'z' | 0x80: \ - val = va_arg(ap, size_t); \ - break; \ - case 'p': /* Synthetic; used for %p. */ \ - val = va_arg(ap, uintptr_t); \ - break; \ - default: \ - not_reached(); \ - val = 0; \ - } \ -} while (0) +#define APPEND_C(c) \ + do { \ + if (i < size) { \ + str[i] = (c); \ + } \ + i++; \ + } while (0) +#define APPEND_S(s, slen) \ + do { \ + if (i < size) { \ + size_t cpylen = (slen <= size - i) ? slen : size - i; \ + memcpy(&str[i], s, cpylen); \ + } \ + i += slen; \ + } while (0) +#define APPEND_PADDED_S(s, slen, width, left_justify) \ + do { \ + /* Left padding. */ \ + size_t pad_len = (width == -1) \ + ? 0 \ + : ((slen < (size_t)width) ? (size_t)width - slen : 0); \ + if (!left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) { \ + if (pad_zero) { \ + APPEND_C('0'); \ + } else { \ + APPEND_C(' '); \ + } \ + } \ + } \ + /* Value. */ \ + APPEND_S(s, slen); \ + /* Right padding. */ \ + if (left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) { \ + APPEND_C(' '); \ + } \ + } \ + } while (0) +#define GET_ARG_NUMERIC(val, len) \ + do { \ + switch ((unsigned char)len) { \ + case '?': \ + val = va_arg(ap, int); \ + break; \ + case '?' | 0x80: \ + val = va_arg(ap, unsigned int); \ + break; \ + case 'l': \ + val = va_arg(ap, long); \ + break; \ + case 'l' | 0x80: \ + val = va_arg(ap, unsigned long); \ + break; \ + case 'q': \ + val = va_arg(ap, long long); \ + break; \ + case 'q' | 0x80: \ + val = va_arg(ap, unsigned long long); \ + break; \ + case 'j': \ + val = va_arg(ap, intmax_t); \ + break; \ + case 'j' | 0x80: \ + val = va_arg(ap, uintmax_t); \ + break; \ + case 't': \ + val = va_arg(ap, ptrdiff_t); \ + break; \ + case 'z': \ + val = va_arg(ap, ssize_t); \ + break; \ + case 'z' | 0x80: \ + val = va_arg(ap, size_t); \ + break; \ + case 'p': /* Synthetic; used for %p. */ \ + val = va_arg(ap, uintptr_t); \ + break; \ + default: \ + not_reached(); \ + val = 0; \ + } \ + } while (0) i = 0; f = format; while (true) { switch (*f) { - case '\0': goto label_out; + case '\0': + goto label_out; case '%': { - bool alt_form = false; - bool left_justify = false; - bool plus_space = false; - bool plus_plus = false; - int prec = -1; - int width = -1; + bool alt_form = false; + bool left_justify = false; + bool plus_space = false; + bool plus_plus = false; + int prec = -1; + int width = -1; unsigned char len = '?'; - char *s; - size_t slen; - bool pad_zero = false; + char *s; + size_t slen; + bool pad_zero = false; f++; /* Flags. */ @@ -446,12 +488,13 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { assert(!plus_plus); plus_plus = true; break; - default: goto label_width; + default: + goto label_width; } f++; } - /* Width. */ - label_width: + /* Width. */ + label_width: switch (*f) { case '*': width = va_arg(ap, int); @@ -464,16 +507,24 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { case '0': pad_zero = true; JEMALLOC_FALLTHROUGH; - case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { uintmax_t uwidth; set_errno(0); uwidth = malloc_strtoumax(f, (char **)&f, 10); - assert(uwidth != UINTMAX_MAX || get_errno() != - ERANGE); + assert(uwidth != UINTMAX_MAX + || get_errno() != ERANGE); width = (int)uwidth; break; - } default: + } + default: break; } /* Width/precision separator. */ @@ -488,20 +539,29 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { prec = va_arg(ap, int); f++; break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { uintmax_t uprec; set_errno(0); uprec = malloc_strtoumax(f, (char **)&f, 10); - assert(uprec != UINTMAX_MAX || get_errno() != - ERANGE); + assert(uprec != UINTMAX_MAX + || get_errno() != ERANGE); prec = (int)uprec; break; } - default: break; + default: + break; } - /* Length. */ - label_length: + /* Length. */ + label_length: switch (*f) { case 'l': f++; @@ -512,11 +572,15 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { len = 'l'; } break; - case 'q': case 'j': case 't': case 'z': + case 'q': + case 'j': + case 't': + case 'z': len = *f; f++; break; - default: break; + default: + break; } /* Conversion specifier. */ switch (*f) { @@ -525,9 +589,10 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { APPEND_C(*f); f++; break; - case 'd': case 'i': { + case 'd': + case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[D2S_BUFSIZE]; + char buf[D2S_BUFSIZE]; /* * Outputting negative, zero-padded numbers @@ -542,41 +607,48 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { assert(!pad_zero); GET_ARG_NUMERIC(val, len); - s = d2s(val, (plus_plus ? '+' : (plus_space ? - ' ' : '-')), buf, &slen); + s = d2s(val, + (plus_plus ? '+' + : (plus_space ? ' ' : '-')), + buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; - } case 'o': { + } + case 'o': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[O2S_BUFSIZE]; + char buf[O2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = o2s(val, alt_form, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; - } case 'u': { + } + case 'u': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[U2S_BUFSIZE]; + char buf[U2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = u2s(val, 10, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; - } case 'x': case 'X': { + } + case 'x': + case 'X': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[X2S_BUFSIZE]; + char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = x2s(val, alt_form, *f == 'X', buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; - } case 'c': { + } + case 'c': { unsigned char val; - char buf[2]; + char buf[2]; assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); @@ -586,7 +658,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { APPEND_PADDED_S(buf, 1, width, left_justify); f++; break; - } case 's': + } + case 's': assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); @@ -596,23 +669,27 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { break; case 'p': { uintmax_t val; - char buf[X2S_BUFSIZE]; + char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, 'p'); s = x2s(val, true, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; - } default: not_reached(); + } + default: + not_reached(); } break; - } default: { + } + default: { APPEND_C(*f); f++; break; - }} + } + } } - label_out: +label_out: if (i < size) { str[i] = '\0'; } else { @@ -629,7 +706,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { JEMALLOC_FORMAT_PRINTF(3, 4) size_t malloc_snprintf(char *str, size_t size, const char *format, ...) { - size_t ret; + size_t ret; va_list ap; va_start(ap, format); @@ -640,8 +717,8 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) { } void -malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format, - va_list ap) { +malloc_vcprintf( + write_cb_t *write_cb, void *cbopaque, const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { @@ -650,8 +727,8 @@ malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format, * function, so use the default one. malloc_write() is an * inline function, so use malloc_message() directly here. */ - write_cb = (je_malloc_message != NULL) ? je_malloc_message : - wrtmessage; + write_cb = (je_malloc_message != NULL) ? je_malloc_message + : wrtmessage; } malloc_vsnprintf(buf, sizeof(buf), format, ap); diff --git a/src/mutex.c b/src/mutex.c index 5655100d..aa2ab665 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -6,7 +6,7 @@ #include "jemalloc/internal/spin.h" #if defined(_WIN32) && !defined(_CRT_SPINCOUNT) -#define _CRT_SPINCOUNT 4000 +# define _CRT_SPINCOUNT 4000 #endif /* @@ -22,8 +22,8 @@ int64_t opt_mutex_max_spin = 600; bool isthreaded = false; #endif #ifdef JEMALLOC_MUTEX_INIT_CB -static bool postpone_init = true; -static malloc_mutex_t *postponed_mutexes = NULL; +static bool postpone_init = true; +static malloc_mutex_t *postponed_mutexes = NULL; #endif /******************************************************************************/ @@ -44,14 +44,14 @@ pthread_create(pthread_t *__restrict thread, /******************************************************************************/ #ifdef JEMALLOC_MUTEX_INIT_CB -JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); +JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb( + pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif void malloc_mutex_lock_slow(malloc_mutex_t *mutex) { mutex_prof_data_t *data = &mutex->prof_data; - nstime_t before; + nstime_t before; if (ncpus == 1) { goto label_spin_done; @@ -61,7 +61,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) { do { spin_cpu_spinwait(); if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED) - && !malloc_mutex_trylock_final(mutex)) { + && !malloc_mutex_trylock_final(mutex)) { data->n_spin_acquired++; return; } @@ -77,8 +77,9 @@ label_spin_done: /* Copy before to after to avoid clock skews. */ nstime_t after; nstime_copy(&after, &before); - uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, - ATOMIC_RELAXED) + 1; + uint32_t n_thds = atomic_fetch_add_u32( + &data->n_waiting_thds, 1, ATOMIC_RELAXED) + + 1; /* One last try as above two calls may take quite some cycles. */ if (!malloc_mutex_trylock_final(mutex)) { atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); @@ -137,27 +138,28 @@ mutex_addr_comp(const witness_t *witness1, void *mutex1, } bool -malloc_mutex_init(malloc_mutex_t *mutex, const char *name, - witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, + malloc_mutex_lock_order_t lock_order) { mutex_prof_data_init(&mutex->prof_data); #ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 +# if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); -# else - if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) { +# else + if (!InitializeCriticalSectionAndSpinCount( + &mutex->lock, _CRT_SPINCOUNT)) { return true; } -# endif +# endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - mutex->lock = OS_UNFAIR_LOCK_INIT; + mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) if (postpone_init) { mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { - if (_pthread_mutex_init_calloc_cb(&mutex->lock, - bootstrap_calloc) != 0) { + if (_pthread_mutex_init_calloc_cb( + &mutex->lock, bootstrap_calloc) + != 0) { return true; } } @@ -201,9 +203,10 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(tsdn, mutex); #else - if (malloc_mutex_init(mutex, mutex->witness.name, - mutex->witness.rank, mutex->lock_order)) { - malloc_printf(": Error re-initializing mutex in " + if (malloc_mutex_init(mutex, mutex->witness.name, mutex->witness.rank, + mutex->lock_order)) { + malloc_printf( + ": Error re-initializing mutex in " "child\n"); if (opt_abort) { abort(); @@ -217,8 +220,9 @@ malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { - if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - bootstrap_calloc) != 0) { + if (_pthread_mutex_init_calloc_cb( + &postponed_mutexes->lock, bootstrap_calloc) + != 0) { return true; } postponed_mutexes = postponed_mutexes->postponed_next; diff --git a/src/nstime.c b/src/nstime.c index 894753aa..ee2ddc51 100644 --- a/src/nstime.c +++ b/src/nstime.c @@ -5,8 +5,8 @@ #include "jemalloc/internal/assert.h" -#define BILLION UINT64_C(1000000000) -#define MILLION UINT64_C(1000000) +#define BILLION UINT64_C(1000000000) +#define MILLION UINT64_C(1000000) static void nstime_set_initialized(nstime_t *time) { @@ -22,8 +22,8 @@ nstime_assert_initialized(const nstime_t *time) { * Some parts (e.g. stats) rely on memset to zero initialize. Treat * these as valid initialization. */ - assert(time->magic == NSTIME_MAGIC || - (time->magic == 0 && time->ns == 0)); + assert( + time->magic == NSTIME_MAGIC || (time->magic == 0 && time->ns == 0)); #endif } @@ -133,8 +133,10 @@ nstime_isubtract(nstime_t *time, uint64_t subtrahend) { void nstime_imultiply(nstime_t *time, uint64_t multiplier) { nstime_assert_initialized(time); - assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << - 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + assert( + (((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << 2))) + == 0) + || ((time->ns * multiplier) / multiplier == time->ns)); nstime_initialize_operand(time); time->ns *= multiplier; @@ -178,7 +180,7 @@ nstime_ms_since(const nstime_t *past) { } #ifdef _WIN32 -# define NSTIME_MONOTONIC false +# define NSTIME_MONOTONIC false static void nstime_get(nstime_t *time) { FILETIME ft; @@ -190,7 +192,7 @@ nstime_get(nstime_t *time) { nstime_init(time, ticks_100ns * 100); } #elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE) -# define NSTIME_MONOTONIC true +# define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { struct timespec ts; @@ -199,7 +201,7 @@ nstime_get(nstime_t *time) { nstime_init2(time, ts.tv_sec, ts.tv_nsec); } #elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC) -# define NSTIME_MONOTONIC true +# define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { struct timespec ts; @@ -208,24 +210,24 @@ nstime_get(nstime_t *time) { nstime_init2(time, ts.tv_sec, ts.tv_nsec); } #elif defined(JEMALLOC_HAVE_CLOCK_GETTIME_NSEC_NP) -# define NSTIME_MONOTONIC true +# define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { nstime_init(time, clock_gettime_nsec_np(CLOCK_UPTIME_RAW)); } #elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME) -# define NSTIME_MONOTONIC true +# define NSTIME_MONOTONIC true static void nstime_get(nstime_t *time) { static mach_timebase_info_data_t sTimebaseInfo; if (sTimebaseInfo.denom == 0) { - (void) mach_timebase_info(&sTimebaseInfo); + (void)mach_timebase_info(&sTimebaseInfo); } - nstime_init(time, mach_absolute_time() * sTimebaseInfo.numer - / sTimebaseInfo.denom); + nstime_init(time, + mach_absolute_time() * sTimebaseInfo.numer / sTimebaseInfo.denom); } #else -# define NSTIME_MONOTONIC false +# define NSTIME_MONOTONIC false static void nstime_get(nstime_t *time) { struct timeval tv; @@ -242,15 +244,13 @@ nstime_monotonic_impl(void) { } nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; -prof_time_res_t opt_prof_time_res = - prof_time_res_default; +prof_time_res_t opt_prof_time_res = prof_time_res_default; const char *const prof_time_res_mode_names[] = { - "default", - "high", + "default", + "high", }; - static void nstime_get_realtime(nstime_t *time) { #if defined(JEMALLOC_HAVE_CLOCK_REALTIME) && !defined(_WIN32) @@ -302,5 +302,3 @@ nstime_prof_init_update(nstime_t *time) { nstime_init_zero(time); nstime_prof_update(time); } - - diff --git a/src/pa.c b/src/pa.c index 7a24ae65..becf69b1 100644 --- a/src/pa.c +++ b/src/pa.c @@ -41,8 +41,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central, } if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache, - cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms, - &stats->pac_stats, stats_mtx)) { + cur_time, pac_oversize_threshold, dirty_decay_ms, + muzzy_decay_ms, &stats->pac_stats, stats_mtx)) { return true; } @@ -68,11 +68,11 @@ bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard, const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) { if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap, - shard->base, &shard->edata_cache, shard->ind, hpa_opts)) { + shard->base, &shard->edata_cache, shard->ind, hpa_opts)) { return true; } if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai, - hpa_sec_opts)) { + hpa_sec_opts)) { return true; } shard->ever_used_hpa = true; @@ -114,16 +114,16 @@ pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) { static pai_t * pa_get_pai(pa_shard_t *shard, edata_t *edata) { - return (edata_pai_get(edata) == EXTENT_PAI_PAC - ? &shard->pac.pai : &shard->hpa_sec.pai); + return (edata_pai_get(edata) == EXTENT_PAI_PAC ? &shard->pac.pai + : &shard->hpa_sec.pai); } edata_t * pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, bool slab, szind_t szind, bool zero, bool guarded, bool *deferred_work_generated) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); assert(!guarded || alignment <= PAGE); edata_t *edata = NULL; @@ -190,8 +190,8 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, size_t shrink_amount = old_size - new_size; pai_t *pai = pa_get_pai(shard, edata); - bool error = pai_shrink(tsdn, pai, edata, old_size, new_size, - deferred_work_generated); + bool error = pai_shrink( + tsdn, pai, edata, old_size, new_size, deferred_work_generated); if (error) { return true; } @@ -232,11 +232,11 @@ pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) { } void -pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard, - bool deferral_allowed) { +pa_shard_set_deferral_allowed( + tsdn_t *tsdn, pa_shard_t *shard, bool deferral_allowed) { if (pa_shard_uses_hpa(shard)) { - hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard, - deferral_allowed); + hpa_shard_set_deferral_allowed( + tsdn, &shard->hpa_shard, deferral_allowed); } } @@ -260,8 +260,8 @@ pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) { } if (pa_shard_uses_hpa(shard)) { - uint64_t hpa = - pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai); + uint64_t hpa = pai_time_until_deferred_work( + tsdn, &shard->hpa_shard.pai); if (hpa < time) { time = hpa; } diff --git a/src/pa_extra.c b/src/pa_extra.c index 76507039..7c2498b7 100644 --- a/src/pa_extra.c +++ b/src/pa_extra.c @@ -94,8 +94,8 @@ pa_shard_nmuzzy(pa_shard_t *shard) { } void -pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty, - size_t *nmuzzy) { +pa_shard_basic_stats_merge( + pa_shard_t *shard, size_t *nactive, size_t *ndirty, size_t *nmuzzy) { *nactive += pa_shard_nactive(shard); *ndirty += pa_shard_ndirty(shard); *nmuzzy += pa_shard_nmuzzy(shard); @@ -122,29 +122,29 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard, locked_inc_u64_unsynchronized( &pa_shard_stats_out->pac_stats.decay_dirty.npurge, locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), - &shard->pac.stats->decay_dirty.npurge)); + &shard->pac.stats->decay_dirty.npurge)); locked_inc_u64_unsynchronized( &pa_shard_stats_out->pac_stats.decay_dirty.nmadvise, locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), - &shard->pac.stats->decay_dirty.nmadvise)); + &shard->pac.stats->decay_dirty.nmadvise)); locked_inc_u64_unsynchronized( &pa_shard_stats_out->pac_stats.decay_dirty.purged, locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), - &shard->pac.stats->decay_dirty.purged)); + &shard->pac.stats->decay_dirty.purged)); /* Muzzy decay stats */ locked_inc_u64_unsynchronized( &pa_shard_stats_out->pac_stats.decay_muzzy.npurge, locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), - &shard->pac.stats->decay_muzzy.npurge)); + &shard->pac.stats->decay_muzzy.npurge)); locked_inc_u64_unsynchronized( &pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise, locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), - &shard->pac.stats->decay_muzzy.nmadvise)); + &shard->pac.stats->decay_muzzy.nmadvise)); locked_inc_u64_unsynchronized( &pa_shard_stats_out->pac_stats.decay_muzzy.purged, locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), - &shard->pac.stats->decay_muzzy.purged)); + &shard->pac.stats->decay_muzzy.purged)); atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm, atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED)); @@ -157,8 +157,8 @@ pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard, retained = ecache_nextents_get(&shard->pac.ecache_retained, i); dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i); muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i); - retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained, - i); + retained_bytes = ecache_nbytes_get( + &shard->pac.ecache_retained, i); estats_out[i].ndirty = dirty; estats_out[i].nmuzzy = muzzy; diff --git a/src/pac.c b/src/pac.c index 0e435717..361816e9 100644 --- a/src/pac.c +++ b/src/pac.c @@ -7,18 +7,18 @@ static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, bool guarded, bool frequent_reuse, bool *deferred_work_generated); -static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, - size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); -static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, - size_t old_size, size_t new_size, bool *deferred_work_generated); -static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated); +static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); +static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated); +static void pac_dalloc_impl( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated); static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self); static inline void -pac_decay_data_get(pac_t *pac, extent_state_t state, - decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) { - switch(state) { +pac_decay_data_get(pac_t *pac, extent_state_t state, decay_t **r_decay, + pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) { + switch (state) { case extent_state_dirty: *r_decay = &pac->decay_dirty; *r_decay_stats = &pac->stats->decay_dirty; @@ -51,7 +51,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, * merging/splitting extents is non-trivial. */ if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind, - /* delay_coalesce */ true)) { + /* delay_coalesce */ true)) { return true; } /* @@ -59,7 +59,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, * the critical path much less often than for dirty extents. */ if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind, - /* delay_coalesce */ false)) { + /* delay_coalesce */ false)) { return true; } /* @@ -68,17 +68,17 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, * coalescing), but also because operations on retained extents are not * in the critical path. */ - if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained, - ind, /* delay_coalesce */ false)) { + if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained, ind, + /* delay_coalesce */ false)) { return true; } exp_grow_init(&pac->exp_grow); if (malloc_mutex_init(&pac->grow_mtx, "extent_grow", - WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { return true; } - atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold, - ATOMIC_RELAXED); + atomic_store_zu( + &pac->oversize_threshold, pac_oversize_threshold, ATOMIC_RELAXED); if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) { return true; } @@ -112,7 +112,8 @@ pac_may_have_muzzy(pac_t *pac) { return pac_decay_ms_get(pac, extent_state_muzzy) != 0; } -static size_t pac_alloc_retained_batched_size(size_t size) { +static size_t +pac_alloc_retained_batched_size(size_t size) { if (size > SC_LARGE_MAXCLASS) { /* * A valid input with usize SC_LARGE_MAXCLASS could still @@ -124,8 +125,8 @@ static size_t pac_alloc_retained_batched_size(size_t size) { } size_t batched_size = sz_s2u_compute_using_delta(size); size_t next_hugepage_size = HUGEPAGE_CEILING(size); - return batched_size > next_hugepage_size? next_hugepage_size: - batched_size; + return batched_size > next_hugepage_size ? next_hugepage_size + : batched_size; } static edata_t * @@ -162,8 +163,8 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, * limits. This choice should be reevaluated if * pac_alloc_retained_batched_size is changed to be more aggressive. */ - if (sz_large_size_classes_disabled() && edata == NULL && - (maps_coalesce || opt_retain)) { + if (sz_large_size_classes_disabled() && edata == NULL + && (maps_coalesce || opt_retain)) { size_t batched_size = pac_alloc_retained_batched_size(size); /* * Note that ecache_alloc_grow will try to retrieve virtual @@ -173,12 +174,12 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, * with opt_retain off. */ edata = ecache_alloc_grow(tsdn, pac, ehooks, - &pac->ecache_retained, NULL, batched_size, - alignment, zero, guarded); + &pac->ecache_retained, NULL, batched_size, alignment, zero, + guarded); if (edata != NULL && batched_size > size) { - edata_t *trail = extent_split_wrapper(tsdn, pac, - ehooks, edata, size, batched_size - size, + edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, + edata, size, batched_size - size, /* holding_core_locks */ false); if (trail == NULL) { ecache_dalloc(tsdn, pac, ehooks, @@ -203,8 +204,8 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, } if (config_stats && newly_mapped_size != 0) { - atomic_fetch_add_zu(&pac->stats->pac_mapped, - newly_mapped_size, ATOMIC_RELAXED); + atomic_fetch_add_zu( + &pac->stats->pac_mapped, newly_mapped_size, ATOMIC_RELAXED); } return edata; @@ -217,8 +218,8 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, edata_t *edata; if (san_bump_enabled() && frequent_reuse) { - edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size, - zero); + edata = san_bump_alloc( + tsdn, &pac->sba, pac, ehooks, size, zero); } else { size_t size_with_guards = san_two_side_guarded_sz(size); /* Alloc a non-guarded extent first.*/ @@ -227,12 +228,12 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, if (edata != NULL) { /* Add guards around it. */ assert(edata_size_get(edata) == size_with_guards); - san_guard_pages_two_sided(tsdn, ehooks, edata, - pac->emap, true); + san_guard_pages_two_sided( + tsdn, ehooks, edata, pac->emap, true); } } - assert(edata == NULL || (edata_guarded_get(edata) && - edata_size_get(edata) == size)); + assert(edata == NULL + || (edata_guarded_get(edata) && edata_size_get(edata) == size)); return edata; } @@ -241,7 +242,7 @@ static edata_t * pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, bool guarded, bool frequent_reuse, bool *deferred_work_generated) { - pac_t *pac = (pac_t *)self; + pac_t *pac = (pac_t *)self; ehooks_t *ehooks = pac_ehooks_get(pac); edata_t *edata = NULL; @@ -252,13 +253,13 @@ pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, * for such allocations would always return NULL. * */ if (!guarded || frequent_reuse) { - edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment, - zero, guarded); + edata = pac_alloc_real( + tsdn, pac, ehooks, size, alignment, zero, guarded); } if (edata == NULL && guarded) { /* No cached guarded extents; creating a new one. */ - edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size, - alignment, zero, frequent_reuse); + edata = pac_alloc_new_guarded( + tsdn, pac, ehooks, size, alignment, zero, frequent_reuse); } return edata; @@ -267,7 +268,7 @@ pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated) { - pac_t *pac = (pac_t *)self; + pac_t *pac = (pac_t *)self; ehooks_t *ehooks = pac_ehooks_get(pac); size_t mapped_add = 0; @@ -296,8 +297,8 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, return true; } if (config_stats && mapped_add > 0) { - atomic_fetch_add_zu(&pac->stats->pac_mapped, mapped_add, - ATOMIC_RELAXED); + atomic_fetch_add_zu( + &pac->stats->pac_mapped, mapped_add, ATOMIC_RELAXED); } return false; } @@ -305,7 +306,7 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool *deferred_work_generated) { - pac_t *pac = (pac_t *)self; + pac_t *pac = (pac_t *)self; ehooks_t *ehooks = pac_ehooks_get(pac); size_t shrink_amount = old_size - new_size; @@ -325,9 +326,9 @@ pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, } static void -pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated) { - pac_t *pac = (pac_t *)self; +pac_dalloc_impl( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) { + pac_t *pac = (pac_t *)self; ehooks_t *ehooks = pac_ehooks_get(pac); if (edata_guarded_get(edata)) { @@ -344,10 +345,10 @@ pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, * guarded). */ if (!edata_slab_get(edata) || !maps_coalesce) { - assert(edata_size_get(edata) >= SC_LARGE_MINCLASS || - !maps_coalesce); - san_unguard_pages_two_sided(tsdn, ehooks, edata, - pac->emap); + assert(edata_size_get(edata) >= SC_LARGE_MINCLASS + || !maps_coalesce); + san_unguard_pages_two_sided( + tsdn, ehooks, edata, pac->emap); } } @@ -362,8 +363,8 @@ pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) { /* Use minimal interval if decay is contended. */ return BACKGROUND_THREAD_DEFERRED_MIN; } - uint64_t result = decay_ns_until_purge(decay, npages, - ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD); + uint64_t result = decay_ns_until_purge( + decay, npages, ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD); malloc_mutex_unlock(tsdn, &decay->mtx); return result; @@ -372,18 +373,16 @@ pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) { static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { uint64_t time; - pac_t *pac = (pac_t *)self; + pac_t *pac = (pac_t *)self; - time = pac_ns_until_purge(tsdn, - &pac->decay_dirty, - ecache_npages_get(&pac->ecache_dirty)); + time = pac_ns_until_purge( + tsdn, &pac->decay_dirty, ecache_npages_get(&pac->ecache_dirty)); if (time == BACKGROUND_THREAD_DEFERRED_MIN) { return time; } - uint64_t muzzy = pac_ns_until_purge(tsdn, - &pac->decay_muzzy, - ecache_npages_get(&pac->ecache_muzzy)); + uint64_t muzzy = pac_ns_until_purge( + tsdn, &pac->decay_muzzy, ecache_npages_get(&pac->ecache_muzzy)); if (muzzy < time) { time = muzzy; } @@ -391,8 +390,8 @@ pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { } bool -pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit, - size_t *new_limit) { +pac_retain_grow_limit_get_set( + tsdn_t *tsdn, pac_t *pac, size_t *old_limit, size_t *new_limit) { pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); if (new_limit != NULL) { size_t limit = *new_limit; @@ -418,15 +417,15 @@ static size_t pac_stash_decayed(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, size_t npages_limit, size_t npages_decay_max, edata_list_inactive_t *result) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); ehooks_t *ehooks = pac_ehooks_get(pac); /* Stash extents according to npages_limit. */ size_t nstashed = 0; while (nstashed < npages_decay_max) { - edata_t *edata = ecache_evict(tsdn, pac, ehooks, ecache, - npages_limit); + edata_t *edata = ecache_evict( + tsdn, pac, ehooks, ecache, npages_limit); if (edata == NULL) { break; } @@ -443,8 +442,8 @@ decay_with_process_madvise(edata_list_inactive_t *decay_extents) { #ifndef JEMALLOC_HAVE_PROCESS_MADVISE return true; #else - assert(opt_process_madvise_max_batch <= - PROCESS_MADVISE_MAX_BATCH_LIMIT); + assert( + opt_process_madvise_max_batch <= PROCESS_MADVISE_MAX_BATCH_LIMIT); size_t len = opt_process_madvise_max_batch; VARIABLE_ARRAY(struct iovec, vec, len); @@ -458,8 +457,8 @@ decay_with_process_madvise(edata_list_inactive_t *decay_extents) { total_bytes += pages_bytes; cur++; if (cur == len) { - bool err = pages_purge_process_madvise(vec, len, - total_bytes); + bool err = pages_purge_process_madvise( + vec, len, total_bytes); if (err) { return true; } @@ -489,14 +488,14 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay, bool try_muzzy = !fully_decay && pac_decay_ms_get(pac, extent_state_muzzy) != 0; - bool purge_to_retained = !try_muzzy || - ecache->state == extent_state_muzzy; + bool purge_to_retained = !try_muzzy + || ecache->state == extent_state_muzzy; /* * Attempt process_madvise only if 1) enabled, 2) purging to retained, * and 3) not using custom hooks. */ - bool try_process_madvise = (opt_process_madvise_max_batch > 0) && - purge_to_retained && ehooks_dalloc_will_fail(ehooks); + bool try_process_madvise = (opt_process_madvise_max_batch > 0) + && purge_to_retained && ehooks_dalloc_will_fail(ehooks); bool already_purged; if (try_process_madvise) { @@ -511,8 +510,8 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay, already_purged = false; } - for (edata_t *edata = edata_list_inactive_first(decay_extents); edata != - NULL; edata = edata_list_inactive_first(decay_extents)) { + for (edata_t *edata = edata_list_inactive_first(decay_extents); + edata != NULL; edata = edata_list_inactive_first(decay_extents)) { edata_list_inactive_remove(decay_extents, edata); size_t size = edata_size_get(edata); @@ -524,8 +523,8 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay, switch (ecache->state) { case extent_state_dirty: if (try_muzzy) { - err = extent_purge_lazy_wrapper(tsdn, ehooks, - edata, /* offset */ 0, size); + err = extent_purge_lazy_wrapper( + tsdn, ehooks, edata, /* offset */ 0, size); if (!err) { ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_muzzy, edata); @@ -535,8 +534,8 @@ pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay, JEMALLOC_FALLTHROUGH; case extent_state_muzzy: if (already_purged) { - extent_dalloc_wrapper_purged(tsdn, pac, ehooks, - edata); + extent_dalloc_wrapper_purged( + tsdn, pac, ehooks, edata); } else { extent_dalloc_wrapper(tsdn, pac, ehooks, edata); } @@ -578,8 +577,8 @@ static void pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay, pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay, size_t npages_limit, size_t npages_decay_max) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 1); + witness_assert_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 1); if (decay->purging || npages_decay_max == 0) { return; @@ -589,8 +588,8 @@ pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay, edata_list_inactive_t decay_extents; edata_list_inactive_init(&decay_extents); - size_t npurge = pac_stash_decayed(tsdn, pac, ecache, npages_limit, - npages_decay_max, &decay_extents); + size_t npurge = pac_stash_decayed( + tsdn, pac, ecache, npages_limit, npages_decay_max, &decay_extents); if (npurge != 0) { size_t npurged = pac_decay_stashed(tsdn, pac, decay, decay_stats, ecache, fully_decay, &decay_extents); @@ -611,8 +610,8 @@ pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay, static void pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, - pac_decay_stats_t *decay_stats, ecache_t *ecache, - size_t current_npages, size_t npages_limit) { + pac_decay_stats_t *decay_stats, ecache_t *ecache, size_t current_npages, + size_t npages_limit) { if (current_npages > npages_limit) { pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache, /* fully_decay */ false, npages_limit, @@ -647,8 +646,8 @@ pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, nstime_t time; nstime_init_update(&time); size_t npages_current = ecache_npages_get(ecache); - bool epoch_advanced = decay_maybe_advance_epoch(decay, &time, - npages_current); + bool epoch_advanced = decay_maybe_advance_epoch( + decay, &time, npages_current); if (eagerness == PAC_PURGE_ALWAYS || (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) { size_t npages_limit = decay_npages_limit_get(decay); @@ -662,9 +661,9 @@ pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, ssize_t decay_ms, pac_purge_eagerness_t eagerness) { - decay_t *decay; + decay_t *decay; pac_decay_stats_t *decay_stats; - ecache_t *ecache; + ecache_t *ecache; pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache); if (!decay_ms_valid(decay_ms)) { @@ -691,9 +690,9 @@ pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state) { - decay_t *decay; + decay_t *decay; pac_decay_stats_t *decay_stats; - ecache_t *ecache; + ecache_t *ecache; pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache); return decay_ms_read(decay); } @@ -722,9 +721,10 @@ pac_destroy(tsdn_t *tsdn, pac_t *pac) { * dss-based extents for later reuse. */ ehooks_t *ehooks = pac_ehooks_get(pac); - edata_t *edata; - while ((edata = ecache_evict(tsdn, pac, ehooks, - &pac->ecache_retained, 0)) != NULL) { + edata_t *edata; + while ( + (edata = ecache_evict(tsdn, pac, ehooks, &pac->ecache_retained, 0)) + != NULL) { extent_destroy_wrapper(tsdn, pac, ehooks, edata); } } diff --git a/src/pages.c b/src/pages.c index d53e0fef..88301c2b 100644 --- a/src/pages.c +++ b/src/pages.c @@ -8,46 +8,42 @@ #include "jemalloc/internal/malloc_io.h" #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT -#include -#ifdef __FreeBSD__ -#include -#endif +# include +# ifdef __FreeBSD__ +# include +# endif #endif #ifdef __NetBSD__ -#include /* ilog2 */ +# include /* ilog2 */ #endif #ifdef JEMALLOC_HAVE_VM_MAKE_TAG -#define PAGES_FD_TAG VM_MAKE_TAG(254U) +# define PAGES_FD_TAG VM_MAKE_TAG(254U) #else -#define PAGES_FD_TAG -1 +# define PAGES_FD_TAG -1 #endif #if defined(JEMALLOC_HAVE_PRCTL) && defined(JEMALLOC_PAGEID) -#include -#ifndef PR_SET_VMA -#define PR_SET_VMA 0x53564d41 -#define PR_SET_VMA_ANON_NAME 0 -#endif +# include +# ifndef PR_SET_VMA +# define PR_SET_VMA 0x53564d41 +# define PR_SET_VMA_ANON_NAME 0 +# endif #endif /******************************************************************************/ /* Data. */ /* Actual operating system page size, detected during bootstrap, <= PAGE. */ -size_t os_page; +size_t os_page; #ifndef _WIN32 -# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) -# define PAGES_PROT_DECOMMIT (PROT_NONE) -static int mmap_flags; +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; #endif -static bool os_overcommits; +static bool os_overcommits; const char *const thp_mode_names[] = { - "default", - "always", - "never", - "not supported" -}; + "default", "always", "never", "not supported"}; thp_mode_t opt_thp = THP_MODE_DEFAULT; thp_mode_t init_system_thp_mode; @@ -66,15 +62,16 @@ static int madvise_dont_need_zeros_is_faulty = -1; * * [1]: https://patchwork.kernel.org/patch/10576637/ */ -static int madvise_MADV_DONTNEED_zeroes_pages(void) -{ +static int +madvise_MADV_DONTNEED_zeroes_pages(void) { size_t size = PAGE; - void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + void *addr = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { - malloc_write(": Cannot allocate memory for " + malloc_write( + ": Cannot allocate memory for " "MADV_DONTNEED check\n"); if (opt_abort) { abort(); @@ -94,7 +91,8 @@ static int madvise_MADV_DONTNEED_zeroes_pages(void) } if (munmap(addr, size) != 0) { - malloc_write(": Cannot deallocate memory for " + malloc_write( + ": Cannot deallocate memory for " "MADV_DONTNEED check\n"); if (opt_abort) { abort(); @@ -106,18 +104,18 @@ static int madvise_MADV_DONTNEED_zeroes_pages(void) #endif #ifdef JEMALLOC_PAGEID -static int os_page_id(void *addr, size_t size, const char *name) -{ -#ifdef JEMALLOC_HAVE_PRCTL +static int +os_page_id(void *addr, size_t size, const char *name) { +# ifdef JEMALLOC_HAVE_PRCTL /* * While parsing `/proc//maps` file, the block could appear as * 7f4836000000-7f4836800000 rw-p 00000000 00:00 0 [anon:jemalloc_pg_overcommit]` */ return prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (uintptr_t)addr, size, (uintptr_t)name); -#else +# else return 0; -#endif +# endif } #endif @@ -156,7 +154,7 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { */ { int flags = mmap_flags; -#ifdef __NetBSD__ +# ifdef __NetBSD__ /* * On NetBSD PAGE for a platform is defined to the * maximum page size of all machine architectures @@ -167,7 +165,7 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { unsigned int a = ilog2(MAX(alignment, PAGE)); flags |= MAP_ALIGNED(a); } -#endif +# endif int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; ret = mmap(addr, size, prot, flags, PAGES_FD_TAG, 0); @@ -184,8 +182,8 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { ret = NULL; } #endif - assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && - ret == addr)); + assert(ret == NULL || (addr == NULL && ret != addr) + || (addr != NULL && ret == addr)); #ifdef JEMALLOC_PAGEID int n = os_page_id(ret, size, os_overcommits ? "jemalloc_pg_overcommit" : "jemalloc_pg"); @@ -195,8 +193,8 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { } static void * -os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, - bool *commit) { +os_pages_trim( + void *addr, size_t alloc_size, size_t leadsize, size_t size, bool *commit) { void *ret = (void *)((byte_t *)addr + leadsize); assert(alloc_size >= leadsize + size); @@ -237,13 +235,15 @@ os_pages_unmap(void *addr, size_t size) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); - malloc_printf(": Error in " + malloc_printf( + ": Error in " #ifdef _WIN32 "VirtualFree" #else "munmap" #endif - "(): %s\n", buf); + "(): %s\n", + buf); if (opt_abort) { abort(); } @@ -350,13 +350,14 @@ os_pages_commit(void *addr, size_t size, bool commit) { assert(PAGE_CEILING(size) == size); #ifdef _WIN32 - return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, - PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); + return (commit + ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE)) + : (!VirtualFree(addr, size, MEM_DECOMMIT))); #else { - int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; - void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, - PAGES_FD_TAG, 0); + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap( + addr, size, prot, mmap_flags | MAP_FIXED, PAGES_FD_TAG, 0); if (result == MAP_FAILED) { return true; } @@ -395,8 +396,8 @@ pages_decommit(void *addr, size_t size) { void pages_mark_guards(void *head, void *tail) { assert(head != NULL || tail != NULL); - assert(head == NULL || tail == NULL || - (uintptr_t)head < (uintptr_t)tail); + assert( + head == NULL || tail == NULL || (uintptr_t)head < (uintptr_t)tail); #ifdef JEMALLOC_HAVE_MPROTECT if (head != NULL) { mprotect(head, PAGE, PROT_NONE); @@ -418,13 +419,12 @@ pages_mark_guards(void *head, void *tail) { void pages_unmark_guards(void *head, void *tail) { assert(head != NULL || tail != NULL); - assert(head == NULL || tail == NULL || - (uintptr_t)head < (uintptr_t)tail); + assert( + head == NULL || tail == NULL || (uintptr_t)head < (uintptr_t)tail); #ifdef JEMALLOC_HAVE_MPROTECT - bool head_and_tail = (head != NULL) && (tail != NULL); - size_t range = head_and_tail ? - (uintptr_t)tail - (uintptr_t)head + PAGE : - SIZE_T_MAX; + bool head_and_tail = (head != NULL) && (tail != NULL); + size_t range = head_and_tail ? (uintptr_t)tail - (uintptr_t)head + PAGE + : SIZE_T_MAX; /* * The amount of work that the kernel does in mprotect depends on the * range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen @@ -473,17 +473,18 @@ pages_purge_lazy(void *addr, size_t size) { return false; #elif defined(JEMALLOC_PURGE_MADVISE_FREE) return (madvise(addr, size, -# ifdef MADV_FREE - MADV_FREE -# else - JEMALLOC_MADV_FREE -# endif - ) != 0); -#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ - !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) +# ifdef MADV_FREE + MADV_FREE +# else + JEMALLOC_MADV_FREE +# endif + ) + != 0); +#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) \ + && !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) return (madvise(addr, size, MADV_DONTNEED) != 0); -#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \ - !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS) +#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) \ + && !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS) return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0); #else not_reached(); @@ -499,14 +500,14 @@ pages_purge_forced(void *addr, size_t size) { return true; } -#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ - defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) - return (unlikely(madvise_dont_need_zeros_is_faulty) || - madvise(addr, size, MADV_DONTNEED) != 0); -#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \ - defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS) - return (unlikely(madvise_dont_need_zeros_is_faulty) || - posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0); +#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) \ + && defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (unlikely(madvise_dont_need_zeros_is_faulty) + || madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) \ + && defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS) + return (unlikely(madvise_dont_need_zeros_is_faulty) + || posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0); #elif defined(JEMALLOC_MAPS_COALESCE) /* Try to overlay a new demand-zeroed mapping. */ return pages_commit(addr, size); @@ -579,13 +580,13 @@ pages_collapse(void *addr, size_t size) { * means we can't call pages_collapse on freshly mapped memory region. * See madvise(2) man page for more details. */ -#if defined(JEMALLOC_HAVE_MADVISE_COLLAPSE) && \ - (defined(MADV_COLLAPSE) || defined(JEMALLOC_MADV_COLLAPSE)) -# if defined(MADV_COLLAPSE) +#if defined(JEMALLOC_HAVE_MADVISE_COLLAPSE) \ + && (defined(MADV_COLLAPSE) || defined(JEMALLOC_MADV_COLLAPSE)) +# if defined(MADV_COLLAPSE) return (madvise(addr, size, MADV_COLLAPSE) != 0); -# elif defined(JEMALLOC_MADV_COLLAPSE) +# elif defined(JEMALLOC_MADV_COLLAPSE) return (madvise(addr, size, JEMALLOC_MADV_COLLAPSE) != 0); -# endif +# endif #else return true; #endif @@ -618,8 +619,8 @@ pages_dodump(void *addr, size_t size) { } #ifdef JEMALLOC_HAVE_PROCESS_MADVISE -#include -#include +# include +# include static int pidfd; static bool @@ -640,15 +641,16 @@ init_process_madvise(void) { return false; } -#ifdef SYS_process_madvise -#define JE_SYS_PROCESS_MADVISE_NR SYS_process_madvise -#else -#define JE_SYS_PROCESS_MADVISE_NR EXPERIMENTAL_SYS_PROCESS_MADVISE_NR -#endif +# ifdef SYS_process_madvise +# define JE_SYS_PROCESS_MADVISE_NR SYS_process_madvise +# else +# define JE_SYS_PROCESS_MADVISE_NR \ + EXPERIMENTAL_SYS_PROCESS_MADVISE_NR +# endif static bool -pages_purge_process_madvise_impl(void *vec, size_t vec_len, - size_t total_bytes) { +pages_purge_process_madvise_impl( + void *vec, size_t vec_len, size_t total_bytes) { size_t purged_bytes = (size_t)syscall(JE_SYS_PROCESS_MADVISE_NR, pidfd, (struct iovec *)vec, vec_len, MADV_DONTNEED, 0); @@ -663,8 +665,8 @@ init_process_madvise(void) { } static bool -pages_purge_process_madvise_impl(void *vec, size_t vec_len, - size_t total_bytes) { +pages_purge_process_madvise_impl( + void *vec, size_t vec_len, size_t total_bytes) { not_reached(); return true; } @@ -700,11 +702,11 @@ os_page_detect(void) { #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT static bool os_overcommits_sysctl(void) { - int vm_overcommit; + int vm_overcommit; size_t sz; sz = sizeof(vm_overcommit); -#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) +# if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) int mib[2]; mib[0] = CTL_VM; @@ -712,11 +714,11 @@ os_overcommits_sysctl(void) { if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) { return false; /* Error. */ } -#else +# else if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { return false; /* Error. */ } -#endif +# endif return ((vm_overcommit & 0x3) == 0); } @@ -730,17 +732,18 @@ os_overcommits_sysctl(void) { */ static bool os_overcommits_proc(void) { - int fd; + int fd; char buf[1]; -#if defined(O_CLOEXEC) - fd = malloc_open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); -#else +# if defined(O_CLOEXEC) + fd = malloc_open( + "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); +# else fd = malloc_open("/proc/sys/vm/overcommit_memory", O_RDONLY); if (fd != -1) { fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); } -#endif +# endif if (fd == -1) { return false; /* Error. */ @@ -763,20 +766,20 @@ os_overcommits_proc(void) { #endif void -pages_set_thp_state (void *ptr, size_t size) { +pages_set_thp_state(void *ptr, size_t size) { if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) { return; } - assert(opt_thp != thp_mode_not_supported && - init_system_thp_mode != thp_mode_not_supported); + assert(opt_thp != thp_mode_not_supported + && init_system_thp_mode != thp_mode_not_supported); if (opt_thp == thp_mode_always && init_system_thp_mode != thp_mode_never) { assert(init_system_thp_mode == thp_mode_default); pages_huge_unaligned(ptr, size); } else if (opt_thp == thp_mode_never) { - assert(init_system_thp_mode == thp_mode_default || - init_system_thp_mode == thp_mode_always); + assert(init_system_thp_mode == thp_mode_default + || init_system_thp_mode == thp_mode_always); pages_nohuge_unaligned(ptr, size); } } @@ -794,7 +797,7 @@ init_thp_state(void) { static const char sys_state_madvise[] = "always [madvise] never\n"; static const char sys_state_always[] = "[always] madvise never\n"; static const char sys_state_never[] = "always madvise [never]\n"; - char buf[sizeof(sys_state_madvise)]; + char buf[sizeof(sys_state_madvise)]; int fd = malloc_open( "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); @@ -839,10 +842,13 @@ pages_boot(void) { #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS if (!opt_trust_madvise) { - madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages(); + madvise_dont_need_zeros_is_faulty = + !madvise_MADV_DONTNEED_zeroes_pages(); if (madvise_dont_need_zeros_is_faulty) { - malloc_write(": MADV_DONTNEED does not work (memset will be used instead)\n"); - malloc_write(": (This is the expected behaviour if you are running under QEMU)\n"); + malloc_write( + ": MADV_DONTNEED does not work (memset will be used instead)\n"); + malloc_write( + ": (This is the expected behaviour if you are running under QEMU)\n"); } } else { /* In case opt_trust_madvise is disable, @@ -859,11 +865,11 @@ pages_boot(void) { os_overcommits = os_overcommits_sysctl(); #elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) os_overcommits = os_overcommits_proc(); -# ifdef MAP_NORESERVE +# ifdef MAP_NORESERVE if (os_overcommits) { mmap_flags |= MAP_NORESERVE; } -# endif +# endif #elif defined(__NetBSD__) os_overcommits = true; #else @@ -879,8 +885,9 @@ pages_boot(void) { #else /* Detect lazy purge runtime support. */ if (pages_can_purge_lazy) { - bool committed = false; - void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed); + bool committed = false; + void *madv_free_page = os_pages_map( + NULL, PAGE, PAGE, &committed); if (madv_free_page == NULL) { return true; } diff --git a/src/pai.c b/src/pai.c index e8cddfc3..3114e658 100644 --- a/src/pai.c +++ b/src/pai.c @@ -6,7 +6,7 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, edata_list_active_t *results, bool frequent_reuse, bool *deferred_work_generated) { for (size_t i = 0; i < nallocs; i++) { - bool deferred_by_alloc = false; + bool deferred_by_alloc = false; edata_t *edata = pai_alloc(tsdn, self, size, PAGE, /* zero */ false, /* guarded */ false, frequent_reuse, &deferred_by_alloc); @@ -20,8 +20,8 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, } void -pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, - edata_list_active_t *list, bool *deferred_work_generated) { +pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list, + bool *deferred_work_generated) { edata_t *edata; while ((edata = edata_list_active_first(list)) != NULL) { bool deferred_by_dalloc = false; diff --git a/src/peak_event.c b/src/peak_event.c index e7f3ced6..e7f54dba 100644 --- a/src/peak_event.c +++ b/src/peak_event.c @@ -12,7 +12,7 @@ void peak_event_update(tsd_t *tsd) { uint64_t alloc = tsd_thread_allocated_get(tsd); uint64_t dalloc = tsd_thread_deallocated_get(tsd); - peak_t *peak = tsd_peakp_get(tsd); + peak_t *peak = tsd_peakp_get(tsd); peak_update(peak, alloc, dalloc); } @@ -32,7 +32,7 @@ void peak_event_zero(tsd_t *tsd) { uint64_t alloc = tsd_thread_allocated_get(tsd); uint64_t dalloc = tsd_thread_deallocated_get(tsd); - peak_t *peak = tsd_peakp_get(tsd); + peak_t *peak = tsd_peakp_get(tsd); peak_set_zero(peak, alloc, dalloc); } @@ -65,8 +65,8 @@ peak_event_enabled(void) { /* Handles alloc and dalloc */ te_base_cb_t peak_te_handler = { - .enabled = &peak_event_enabled, - .new_event_wait = &peak_event_new_event_wait, - .postponed_event_wait = &peak_event_postponed_event_wait, - .event_handler = &peak_event_handler, + .enabled = &peak_event_enabled, + .new_event_wait = &peak_event_new_event_wait, + .postponed_event_wait = &peak_event_postponed_event_wait, + .event_handler = &peak_event_handler, }; diff --git a/src/prof.c b/src/prof.c index ec13afbd..a833fed5 100644 --- a/src/prof.c +++ b/src/prof.c @@ -24,21 +24,21 @@ /* Data. */ -bool opt_prof = false; -bool opt_prof_active = true; -bool opt_prof_thread_active_init = true; +bool opt_prof = false; +bool opt_prof_active = true; +bool opt_prof_thread_active_init = true; unsigned opt_prof_bt_max = PROF_BT_MAX_DEFAULT; -size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; -bool opt_prof_gdump = false; -bool opt_prof_final = false; -bool opt_prof_leak = false; -bool opt_prof_leak_error = false; -bool opt_prof_accum = false; -bool opt_prof_pid_namespace = false; -char opt_prof_prefix[PROF_DUMP_FILENAME_LEN]; -bool opt_prof_sys_thread_name = false; -bool opt_prof_unbias = true; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; +bool opt_prof_final = false; +bool opt_prof_leak = false; +bool opt_prof_leak_error = false; +bool opt_prof_accum = false; +bool opt_prof_pid_namespace = false; +char opt_prof_prefix[PROF_DUMP_FILENAME_LEN]; +bool opt_prof_sys_thread_name = false; +bool opt_prof_unbias = true; /* Accessed via prof_sample_event_handler(). */ static counter_accum_t prof_idump_accumulated; @@ -47,28 +47,28 @@ static counter_accum_t prof_idump_accumulated; * Initialized as opt_prof_active, and accessed via * prof_active_[gs]et{_unlocked,}(). */ -bool prof_active_state; +bool prof_active_state; static malloc_mutex_t prof_active_mtx; /* * Initialized as opt_prof_thread_active_init, and accessed via * prof_thread_active_init_[gs]et(). */ -static bool prof_thread_active_init; +static bool prof_thread_active_init; static malloc_mutex_t prof_thread_active_init_mtx; /* * Initialized as opt_prof_gdump, and accessed via * prof_gdump_[gs]et{_unlocked,}(). */ -bool prof_gdump_val; +bool prof_gdump_val; static malloc_mutex_t prof_gdump_mtx; uint64_t prof_interval = 0; size_t lg_prof_sample; -static uint64_t next_thr_uid; +static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; /* Do not dump any profiles until bootstrapping is complete. */ @@ -113,16 +113,16 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) { } void -prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size, - size_t usize, prof_tctx_t *tctx) { +prof_malloc_sample_object( + tsd_t *tsd, const void *ptr, size_t size, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); if (opt_prof_sys_thread_name) { prof_sys_thread_name_fetch(tsd); } - edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, - ptr); + edata_t *edata = emap_edata_lookup( + tsd_tsdn(tsd), &arena_emap_global, ptr); prof_info_set(tsd, edata, tctx, size); szind_t szind = sz_size2index(usize); @@ -173,8 +173,8 @@ prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size, } void -prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, - prof_info_t *prof_info) { +prof_free_sampled_object( + tsd_t *tsd, const void *ptr, size_t usize, prof_info_t *prof_info) { cassert(config_prof); assert(prof_info != NULL); @@ -279,10 +279,12 @@ prof_sample_new_event_wait(tsd_t *tsd) { * otherwise bytes_until_sample would be 0 if u is exactly 1.0. */ uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53); - double u = (r == 0U) ? 1.0 : (double)((long double)r * - (1.0L/9007199254740992.0L)); - return (uint64_t)(log(u) / - log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + double u = (r == 0U) + ? 1.0 + : (double)((long double)r * (1.0L / 9007199254740992.0L)); + return (uint64_t)(log(u) + / log( + 1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + (uint64_t)1U; #else not_reached(); @@ -322,9 +324,9 @@ prof_sample_enabled(void) { } te_base_cb_t prof_sample_te_handler = { - .enabled = &prof_sample_enabled, - .new_event_wait = &prof_sample_new_event_wait, - /* + .enabled = &prof_sample_enabled, + .new_event_wait = &prof_sample_new_event_wait, + /* * The postponed wait time for prof sample event is computed as if we * want a new wait time (i.e. as if the event were triggered). If we * instead postpone to the immediate next allocation, like how we're @@ -332,8 +334,8 @@ te_base_cb_t prof_sample_te_handler = { * the allocation immediately following a reentrancy always comes from * the same stack trace. */ - .postponed_event_wait = &prof_sample_new_event_wait, - .event_handler = &prof_sample_event_handler, + .postponed_event_wait = &prof_sample_new_event_wait, + .event_handler = &prof_sample_event_handler, }; static void @@ -361,7 +363,7 @@ prof_idump_accum_init(void) { void prof_idump(tsdn_t *tsdn) { - tsd_t *tsd; + tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); @@ -400,7 +402,7 @@ prof_mdump(tsd_t *tsd, const char *filename) { void prof_gdump(tsdn_t *tsdn) { - tsd_t *tsd; + tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); @@ -447,7 +449,7 @@ prof_tdata_t * prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; - bool active = tdata->active; + bool active = tdata->active; /* Keep a local copy of the thread name, before detaching. */ prof_thread_name_assert(tdata); @@ -455,8 +457,8 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { strncpy(thread_name, tdata->thread_name, PROF_THREAD_NAME_MAX_LEN); prof_tdata_detach(tsd, tdata); - return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, - active); + return prof_tdata_init_impl( + tsd, thr_uid, thr_discrim, thread_name, active); } void @@ -595,8 +597,8 @@ prof_backtrace_hook_set(prof_backtrace_hook_t hook) { prof_backtrace_hook_t prof_backtrace_hook_get(void) { - return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook, - ATOMIC_ACQUIRE); + return (prof_backtrace_hook_t)atomic_load_p( + &prof_backtrace_hook, ATOMIC_ACQUIRE); } void @@ -606,8 +608,7 @@ prof_dump_hook_set(prof_dump_hook_t hook) { prof_dump_hook_t prof_dump_hook_get(void) { - return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook, - ATOMIC_ACQUIRE); + return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook, ATOMIC_ACQUIRE); } void @@ -617,8 +618,8 @@ prof_sample_hook_set(prof_sample_hook_t hook) { prof_sample_hook_t prof_sample_hook_get(void) { - return (prof_sample_hook_t)atomic_load_p(&prof_sample_hook, - ATOMIC_ACQUIRE); + return (prof_sample_hook_t)atomic_load_p( + &prof_sample_hook, ATOMIC_ACQUIRE); } void @@ -628,16 +629,16 @@ prof_sample_free_hook_set(prof_sample_free_hook_t hook) { prof_sample_free_hook_t prof_sample_free_hook_get(void) { - return (prof_sample_free_hook_t)atomic_load_p(&prof_sample_free_hook, - ATOMIC_ACQUIRE); + return (prof_sample_free_hook_t)atomic_load_p( + &prof_sample_free_hook, ATOMIC_ACQUIRE); } void prof_boot0(void) { cassert(config_prof); - memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, - sizeof(PROF_PREFIX_DEFAULT)); + memcpy( + opt_prof_prefix, PROF_PREFIX_DEFAULT, sizeof(PROF_PREFIX_DEFAULT)); } void @@ -661,8 +662,8 @@ prof_boot1(void) { opt_prof_gdump = false; } else if (opt_prof) { if (opt_lg_prof_interval >= 0) { - prof_interval = (((uint64_t)1U) << - opt_lg_prof_interval); + prof_interval = (((uint64_t)1U) + << opt_lg_prof_interval); } } } @@ -676,41 +677,40 @@ prof_boot2(tsd_t *tsd, base_t *base) { * stats when opt_prof is false. */ if (malloc_mutex_init(&prof_active_mtx, "prof_active", - WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", - WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_thread_active_init_mtx, - "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, - malloc_mutex_rank_exclusive)) { + "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", - WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", - WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", - WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_stats_mtx, "prof_stats", - WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) { return true; } - if (malloc_mutex_init(&prof_dump_filename_mtx, - "prof_dump_filename", WITNESS_RANK_PROF_DUMP_FILENAME, - malloc_mutex_rank_exclusive)) { + if (malloc_mutex_init(&prof_dump_filename_mtx, "prof_dump_filename", + WITNESS_RANK_PROF_DUMP_FILENAME, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", - WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { return true; } @@ -730,8 +730,8 @@ prof_boot2(tsd_t *tsd, base_t *base) { return true; } - if (opt_prof_final && opt_prof_prefix[0] != '\0' && - atexit(prof_fdump) != 0) { + if (opt_prof_final && opt_prof_prefix[0] != '\0' + && atexit(prof_fdump) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) { abort(); @@ -755,8 +755,8 @@ prof_boot2(tsd_t *tsd, base_t *base) { } for (unsigned i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", - WITNESS_RANK_PROF_GCTX, - malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_GCTX, + malloc_mutex_rank_exclusive)) { return true; } } @@ -768,8 +768,8 @@ prof_boot2(tsd_t *tsd, base_t *base) { } for (unsigned i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", - WITNESS_RANK_PROF_TDATA, - malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_TDATA, + malloc_mutex_rank_exclusive)) { return true; } } @@ -820,8 +820,8 @@ prof_postfork_parent(tsdn_t *tsdn) { if (config_prof && opt_prof) { unsigned i; - malloc_mutex_postfork_parent(tsdn, - &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent( + tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); malloc_mutex_postfork_parent(tsdn, &prof_stats_mtx); malloc_mutex_postfork_parent(tsdn, &prof_recent_alloc_mtx); diff --git a/src/prof_data.c b/src/prof_data.c index edc5c558..7aa047ac 100644 --- a/src/prof_data.c +++ b/src/prof_data.c @@ -36,7 +36,7 @@ malloc_mutex_t prof_dump_mtx; * and destroying mutexes causes complications for systems that allocate when * creating/destroying mutexes. */ -malloc_mutex_t *gctx_locks; +malloc_mutex_t *gctx_locks; static atomic_u_t cum_gctxs; /* Atomic counter. */ /* @@ -69,33 +69,32 @@ static int prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; uint64_t b_thr_uid = b->thr_uid; - int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); if (ret == 0) { uint64_t a_thr_discrim = a->thr_discrim; uint64_t b_thr_discrim = b->thr_discrim; - ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < - b_thr_discrim); + ret = (a_thr_discrim > b_thr_discrim) + - (a_thr_discrim < b_thr_discrim); if (ret == 0) { uint64_t a_tctx_uid = a->tctx_uid; uint64_t b_tctx_uid = b->tctx_uid; - ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < - b_tctx_uid); + ret = (a_tctx_uid > b_tctx_uid) + - (a_tctx_uid < b_tctx_uid); } } return ret; } /* NOLINTBEGIN(performance-no-int-to-ptr) */ -rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, - tctx_link, prof_tctx_comp) -/* NOLINTEND(performance-no-int-to-ptr) */ +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link, + prof_tctx_comp) + /* NOLINTEND(performance-no-int-to-ptr) */ -static int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { + static int prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { unsigned a_len = a->bt.len; unsigned b_len = b->bt.len; unsigned comp_len = (a_len < b_len) ? a_len : b_len; - int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); if (ret == 0) { ret = (a_len > b_len) - (a_len < b_len); } @@ -105,11 +104,10 @@ prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { /* NOLINTBEGIN(performance-no-int-to-ptr) */ rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, prof_gctx_comp) -/* NOLINTEND(performance-no-int-to-ptr) */ + /* NOLINTEND(performance-no-int-to-ptr) */ -static int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { - int ret; + static int prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; @@ -126,12 +124,11 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { /* NOLINTBEGIN(performance-no-int-to-ptr) */ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, prof_tdata_comp) -/* NOLINTEND(performance-no-int-to-ptr) */ + /* NOLINTEND(performance-no-int-to-ptr) */ -/******************************************************************************/ + /******************************************************************************/ -static malloc_mutex_t * -prof_gctx_mutex_choose(void) { + static malloc_mutex_t *prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; @@ -145,8 +142,8 @@ prof_tdata_mutex_choose(uint64_t thr_uid) { bool prof_data_init(tsd_t *tsd) { tdata_tree_new(&tdatas); - return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp); + return ckh_new( + tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp); } static void @@ -195,8 +192,8 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { */ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, - sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), - true); + sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (gctx == NULL) { return NULL; } @@ -215,8 +212,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { } static void -prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, - prof_gctx_t *gctx) { +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx) { cassert(config_prof); /* @@ -267,12 +263,12 @@ static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { - prof_gctx_t *p; - void *v; + prof_gctx_t *p; + void *v; } gctx, tgctx; union { - prof_bt_t *p; - void *v; + prof_bt_t *p; + void *v; } btkey; bool new_gctx; @@ -316,8 +312,8 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, if (tgctx.v != NULL) { /* Lost race to insert. */ - idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, - true); + idalloctm( + tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, true); } } prof_leave(tsd, tdata); @@ -331,11 +327,11 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, prof_tctx_t * prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { - prof_tctx_t *p; - void *v; + prof_tctx_t *p; + void *v; } ret; prof_tdata_t *tdata; - bool not_found; + bool not_found; cassert(config_prof); @@ -349,16 +345,16 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { - void *btkey; + void *btkey; prof_gctx_t *gctx; - bool new_gctx, error; + bool new_gctx, error; /* * This thread's cache lacks bt. Look for it in the global * cache. */ - if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) { + if (prof_lookup_global( + tsd, bt, tdata, &btkey, &gctx, &new_gctx)) { return NULL; } @@ -403,8 +399,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { /* Used in unit tests. */ static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, - void *arg) { +prof_tdata_count_iter( + prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *arg) { size_t *tdata_count = (size_t *)arg; (*tdata_count)++; @@ -415,13 +411,13 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, /* Used in unit tests. */ size_t prof_tdata_count(void) { - size_t tdata_count = 0; + size_t tdata_count = 0; tsdn_t *tsdn; tsdn = tsdn_fetch(); malloc_mutex_lock(tsdn, &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, - (void *)&tdata_count); + tdata_tree_iter( + &tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); malloc_mutex_unlock(tsdn, &tdatas_mtx); return tdata_count; @@ -430,8 +426,8 @@ prof_tdata_count(void) { /* Used in unit tests. */ size_t prof_bt_count(void) { - size_t bt_count; - tsd_t *tsd; + size_t bt_count; + tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); @@ -477,10 +473,10 @@ prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name) { JEMALLOC_FORMAT_PRINTF(3, 4) static void -prof_dump_printf(write_cb_t *prof_dump_write, void *cbopaque, - const char *format, ...) { +prof_dump_printf( + write_cb_t *prof_dump_write, void *cbopaque, const char *format, ...) { va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; + char buf[PROF_PRINTF_BUFSIZE]; va_start(ap, format); malloc_vsnprintf(buf, sizeof(buf), format, ap); @@ -509,7 +505,8 @@ prof_double_uint64_cast(double d) { } #endif -void prof_unbias_map_init(void) { +void +prof_unbias_map_init(void) { /* See the comment in prof_sample_new_event_wait */ #ifdef JEMALLOC_PROF for (szind_t i = 0; i < SC_NSIZES; i++) { @@ -621,8 +618,8 @@ prof_do_unbias(uint64_t c_out_shifted_i, uint64_t s_out_i, uint64_t *r_c_in, } static void -prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque, - const prof_cnt_t *cnts) { +prof_dump_print_cnts( + write_cb_t *prof_dump_write, void *cbopaque, const prof_cnt_t *cnts) { uint64_t curobjs; uint64_t curbytes; uint64_t accumobjs; @@ -639,8 +636,8 @@ prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque, accumbytes = cnts->accumbytes; } prof_dump_printf(prof_dump_write, cbopaque, - "%"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]", - curobjs, curbytes, accumobjs, accumbytes); + "%" FMTu64 ": %" FMTu64 " [%" FMTu64 ": %" FMTu64 "]", curobjs, + curbytes, accumobjs, accumbytes); } static void @@ -660,11 +657,11 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - tdata->cnt_summed.curobjs_shifted_unbiased - += tctx->dump_cnts.curobjs_shifted_unbiased; + tdata->cnt_summed.curobjs_shifted_unbiased += + tctx->dump_cnts.curobjs_shifted_unbiased; tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - tdata->cnt_summed.curbytes_unbiased - += tctx->dump_cnts.curbytes_unbiased; + tdata->cnt_summed.curbytes_unbiased += + tctx->dump_cnts.curbytes_unbiased; if (opt_prof_accum) { tdata->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; @@ -687,17 +684,17 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { malloc_mutex_assert_owner(tsdn, gctx->lock); gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - gctx->cnt_summed.curobjs_shifted_unbiased - += tctx->dump_cnts.curobjs_shifted_unbiased; + gctx->cnt_summed.curobjs_shifted_unbiased += + tctx->dump_cnts.curobjs_shifted_unbiased; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; gctx->cnt_summed.curbytes_unbiased += tctx->dump_cnts.curbytes_unbiased; if (opt_prof_accum) { gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; - gctx->cnt_summed.accumobjs_shifted_unbiased - += tctx->dump_cnts.accumobjs_shifted_unbiased; + gctx->cnt_summed.accumobjs_shifted_unbiased += + tctx->dump_cnts.accumobjs_shifted_unbiased; gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; - gctx->cnt_summed.accumbytes_unbiased - += tctx->dump_cnts.accumbytes_unbiased; + gctx->cnt_summed.accumbytes_unbiased += + tctx->dump_cnts.accumbytes_unbiased; } } @@ -725,9 +722,9 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { typedef struct prof_dump_iter_arg_s prof_dump_iter_arg_t; struct prof_dump_iter_arg_s { - tsdn_t *tsdn; + tsdn_t *tsdn; write_cb_t *prof_dump_write; - void *cbopaque; + void *cbopaque; }; static prof_tctx_t * @@ -743,9 +740,9 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { case prof_tctx_state_dumping: case prof_tctx_state_purgatory: prof_dump_printf(arg->prof_dump_write, arg->cbopaque, - " t%"FMTu64": ", tctx->thr_uid); - prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, - &tctx->dump_cnts); + " t%" FMTu64 ": ", tctx->thr_uid); + prof_dump_print_cnts( + arg->prof_dump_write, arg->cbopaque, &tctx->dump_cnts); arg->prof_dump_write(arg->cbopaque, "\n"); break; default: @@ -756,7 +753,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; + tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); @@ -811,8 +808,8 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_merge_iter_arg_t *arg = (prof_gctx_merge_iter_arg_t *)opaque; malloc_mutex_lock(arg->tsdn, gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, - (void *)arg->tsdn); + tctx_tree_iter( + &gctx->tctxs, NULL, prof_tctx_merge_iter, (void *)arg->tsdn); if (gctx->cnt_summed.curobjs != 0) { (*arg->leak_ngctx)++; } @@ -824,7 +821,7 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { static void prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); - prof_gctx_t *gctx; + prof_gctx_t *gctx; /* * Standard tree iteration won't work here, because as soon as we @@ -840,15 +837,14 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { next = NULL; do { - prof_tctx_t *to_destroy = - tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, + prof_tctx_t *to_destroy = tctx_tree_iter( + &gctx->tctxs, next, prof_tctx_finish_iter, (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { - next = tctx_tree_next(&gctx->tctxs, - to_destroy); - tctx_tree_remove(&gctx->tctxs, - to_destroy); + next = tctx_tree_next( + &gctx->tctxs, to_destroy); + tctx_tree_remove( + &gctx->tctxs, to_destroy); idalloctm(tsd_tsdn(tsd), to_destroy, NULL, NULL, true, true); } else { @@ -869,41 +865,41 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { typedef struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg_t; struct prof_tdata_merge_iter_arg_s { - tsdn_t *tsdn; + tsdn_t *tsdn; prof_cnt_t *cnt_all; }; static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, - void *opaque) { - prof_tdata_merge_iter_arg_t *arg = - (prof_tdata_merge_iter_arg_t *)opaque; +prof_tdata_merge_iter( + prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *opaque) { + prof_tdata_merge_iter_arg_t *arg = (prof_tdata_merge_iter_arg_t *) + opaque; malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { - prof_tctx_t *p; - void *v; + prof_tctx_t *p; + void *v; } tctx; tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); - for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) { + for (tabind = 0; + !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) { prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); } arg->cnt_all->curobjs += tdata->cnt_summed.curobjs; - arg->cnt_all->curobjs_shifted_unbiased - += tdata->cnt_summed.curobjs_shifted_unbiased; + arg->cnt_all->curobjs_shifted_unbiased += + tdata->cnt_summed.curobjs_shifted_unbiased; arg->cnt_all->curbytes += tdata->cnt_summed.curbytes; - arg->cnt_all->curbytes_unbiased - += tdata->cnt_summed.curbytes_unbiased; + arg->cnt_all->curbytes_unbiased += + tdata->cnt_summed.curbytes_unbiased; if (opt_prof_accum) { arg->cnt_all->accumobjs += tdata->cnt_summed.accumobjs; - arg->cnt_all->accumobjs_shifted_unbiased - += tdata->cnt_summed.accumobjs_shifted_unbiased; + arg->cnt_all->accumobjs_shifted_unbiased += + tdata->cnt_summed.accumobjs_shifted_unbiased; arg->cnt_all->accumbytes += tdata->cnt_summed.accumbytes; arg->cnt_all->accumbytes_unbiased += @@ -918,17 +914,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, } static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, - void *opaque) { +prof_tdata_dump_iter( + prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *opaque) { if (!tdata->dumping) { return NULL; } prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque; - prof_dump_printf(arg->prof_dump_write, arg->cbopaque, " t%"FMTu64": ", - tdata->thr_uid); - prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, - &tdata->cnt_summed); + prof_dump_printf(arg->prof_dump_write, arg->cbopaque, + " t%" FMTu64 ": ", tdata->thr_uid); + prof_dump_print_cnts( + arg->prof_dump_write, arg->cbopaque, &tdata->cnt_summed); if (!prof_thread_name_empty(tdata)) { arg->prof_dump_write(arg->cbopaque, " "); arg->prof_dump_write(arg->cbopaque, tdata->thread_name); @@ -940,7 +936,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, static void prof_dump_header(prof_dump_iter_arg_t *arg, const prof_cnt_t *cnt_all) { prof_dump_printf(arg->prof_dump_write, arg->cbopaque, - "heap_v2/%"FMTu64"\n t*: ", ((uint64_t)1U << lg_prof_sample)); + "heap_v2/%" FMTu64 "\n t*: ", ((uint64_t)1U << lg_prof_sample)); prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, cnt_all); arg->prof_dump_write(arg->cbopaque, "\n"); @@ -956,8 +952,8 @@ prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx, malloc_mutex_assert_owner(arg->tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ - if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) + || (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { assert(gctx->cnt_summed.curobjs == 0); assert(gctx->cnt_summed.curbytes == 0); /* @@ -976,12 +972,12 @@ prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx, arg->prof_dump_write(arg->cbopaque, "@"); for (unsigned i = 0; i < bt->len; i++) { prof_dump_printf(arg->prof_dump_write, arg->cbopaque, - " %#"FMTxPTR, (uintptr_t)bt->vec[i]); + " %#" FMTxPTR, (uintptr_t)bt->vec[i]); } arg->prof_dump_write(arg->cbopaque, "\n t*: "); - prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, - &gctx->cnt_summed); + prof_dump_print_cnts( + arg->prof_dump_write, arg->cbopaque, &gctx->cnt_summed); arg->prof_dump_write(arg->cbopaque, "\n"); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, arg); @@ -1002,18 +998,21 @@ prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx) { */ if (cnt_all->curbytes != 0) { double sample_period = (double)((uint64_t)1 << lg_prof_sample); - double ratio = (((double)cnt_all->curbytes) / - (double)cnt_all->curobjs) / sample_period; - double scale_factor = 1.0 / (1.0 - exp(-ratio)); - uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) - * scale_factor); - uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * - scale_factor); + double ratio = (((double)cnt_all->curbytes) + / (double)cnt_all->curobjs) + / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round( + ((double)cnt_all->curbytes) * scale_factor); + uint64_t curobjs = (uint64_t)round( + ((double)cnt_all->curobjs) * scale_factor); - malloc_printf(": Leak approximation summary: ~%"FMTu64 - " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", - curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != - 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + malloc_printf( + ": Leak approximation summary: ~%" FMTu64 + " byte%s, ~%" FMTu64 " object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, + (curobjs != 1) ? "s" : "", leak_ngctx, + (leak_ngctx != 1) ? "s" : ""); malloc_printf( ": Run jeprof on dump output for leak detail\n"); if (opt_prof_leak_error) { @@ -1044,8 +1043,8 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all, size_t *leak_ngctx, prof_gctx_tree_t *gctxs) { size_t tabind; union { - prof_gctx_t *p; - void *v; + prof_gctx_t *p; + void *v; } gctx; prof_enter(tsd, tdata); @@ -1064,19 +1063,19 @@ prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all, * stats and merge them into the associated gctx's. */ memset(cnt_all, 0, sizeof(prof_cnt_t)); - prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {tsd_tsdn(tsd), - cnt_all}; + prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = { + tsd_tsdn(tsd), cnt_all}; malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, - &prof_tdata_merge_iter_arg); + tdata_tree_iter( + &tdatas, NULL, prof_tdata_merge_iter, &prof_tdata_merge_iter_arg); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ *leak_ngctx = 0; - prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {tsd_tsdn(tsd), - leak_ngctx}; - gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, - &prof_gctx_merge_iter_arg); + prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = { + tsd_tsdn(tsd), leak_ngctx}; + gctx_tree_iter( + gctxs, NULL, prof_gctx_merge_iter, &prof_gctx_merge_iter_arg); prof_leave(tsd, tdata); } @@ -1085,12 +1084,12 @@ void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, prof_tdata_t *tdata, bool leakcheck) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_dump_mtx); - prof_cnt_t cnt_all; - size_t leak_ngctx; + prof_cnt_t cnt_all; + size_t leak_ngctx; prof_gctx_tree_t gctxs; prof_dump_prep(tsd, tdata, &cnt_all, &leak_ngctx, &gctxs); - prof_dump_iter_arg_t prof_dump_iter_arg = {tsd_tsdn(tsd), - prof_dump_write, cbopaque}; + prof_dump_iter_arg_t prof_dump_iter_arg = { + tsd_tsdn(tsd), prof_dump_write, cbopaque}; prof_dump_header(&prof_dump_iter_arg, &cnt_all); gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, &prof_dump_iter_arg); prof_gctx_finish(tsd, &gctxs); @@ -1102,12 +1101,12 @@ prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, /* Used in unit tests. */ void prof_cnt_all(prof_cnt_t *cnt_all) { - tsd_t *tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch(); prof_tdata_t *tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { memset(cnt_all, 0, sizeof(prof_cnt_t)); } else { - size_t leak_ngctx; + size_t leak_ngctx; prof_gctx_tree_t gctxs; prof_dump_prep(tsd, tdata, cnt_all, &leak_ngctx, &gctxs); prof_gctx_finish(tsd, &gctxs); @@ -1148,8 +1147,8 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, /* Initialize an empty cache for this thread. */ size_t tdata_sz = ALIGNMENT_CEILING(sizeof(prof_tdata_t), QUANTUM); size_t total_sz = tdata_sz + sizeof(void *) * opt_prof_bt_max; - tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), - total_sz, sz_size2index(total_sz), false, NULL, true, + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), total_sz, + sz_size2index(total_sz), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) { return NULL; @@ -1170,7 +1169,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, prof_thread_name_assert(tdata); if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) { + prof_bt_keycomp)) { idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); return NULL; } @@ -1201,16 +1200,16 @@ prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { } static bool -prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached) { +prof_tdata_should_destroy( + tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsdn, tdata->lock); return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } static void -prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) { +prof_tdata_destroy_locked( + tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tdata->lock); @@ -1234,8 +1233,8 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, - true); + destroy_tdata = prof_tdata_should_destroy( + tsd_tsdn(tsd), tdata, true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. @@ -1270,8 +1269,8 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { } static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, - void *arg) { +prof_tdata_reset_iter( + prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); @@ -1291,8 +1290,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample) { next = NULL; do { - prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, (void *)tsd); + prof_tdata_t *to_destroy = tdata_tree_iter( + &tdatas, next, prof_tdata_reset_iter, (void *)tsd); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); @@ -1355,8 +1354,8 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; tctx->tdata = NULL; ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - bool destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), - tdata, false); + bool destroy_tdata = prof_tdata_should_destroy( + tsd_tsdn(tsd), tdata, false); malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, false); diff --git a/src/prof_log.c b/src/prof_log.c index f4000aec..64b363bb 100644 --- a/src/prof_log.c +++ b/src/prof_log.c @@ -12,7 +12,7 @@ #include "jemalloc/internal/prof_log.h" #include "jemalloc/internal/prof_sys.h" -bool opt_prof_log = false; +bool opt_prof_log = false; typedef enum prof_logging_state_e prof_logging_state_t; enum prof_logging_state_e { prof_logging_state_stopped, @@ -32,8 +32,8 @@ static bool prof_log_dummy = false; /* Incremented for every log file that is output. */ static uint64_t log_seq = 0; -static char log_filename[ - /* Minimize memory bloat for non-prof builds. */ +static char log_filename[ +/* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif @@ -51,8 +51,8 @@ typedef struct prof_bt_node_s prof_bt_node_t; struct prof_bt_node_s { prof_bt_node_t *next; - size_t index; - prof_bt_t bt; + size_t index; + prof_bt_t bt; /* Variable size backtrace vector pointed to by bt. */ void *vec[1]; }; @@ -61,8 +61,8 @@ typedef struct prof_thr_node_s prof_thr_node_t; struct prof_thr_node_s { prof_thr_node_t *next; - size_t index; - uint64_t thr_uid; + size_t index; + uint64_t thr_uid; /* Variable size based on thr_name_sz. */ char name[1]; }; @@ -91,15 +91,15 @@ struct prof_alloc_node_s { * These are the backtraces and threads that have already been logged by an * allocation. */ -static bool log_tables_initialized = false; +static bool log_tables_initialized = false; static ckh_t log_bt_node_set; static ckh_t log_thr_node_set; /* Store linked lists for logged data. */ -static prof_bt_node_t *log_bt_first = NULL; -static prof_bt_node_t *log_bt_last = NULL; -static prof_thr_node_t *log_thr_first = NULL; -static prof_thr_node_t *log_thr_last = NULL; +static prof_bt_node_t *log_bt_first = NULL; +static prof_bt_node_t *log_bt_last = NULL; +static prof_thr_node_t *log_thr_first = NULL; +static prof_thr_node_t *log_thr_last = NULL; static prof_alloc_node_t *log_alloc_first = NULL; static prof_alloc_node_t *log_alloc_last = NULL; @@ -131,12 +131,12 @@ prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { /* See if this backtrace is already cached in the table. */ if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { - size_t sz = offsetof(prof_bt_node_t, vec) + - (bt->len * sizeof(void *)); - prof_bt_node_t *new_node = (prof_bt_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_bt_node_t, vec) + + (bt->len * sizeof(void *)); + prof_bt_node_t *new_node = (prof_bt_node_t *)iallocztm( + tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (log_bt_first == NULL) { log_bt_first = new_node; log_bt_last = new_node; @@ -174,11 +174,11 @@ prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { /* See if this thread is already cached in the table. */ if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { + (void **)(&node), NULL)) { size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; - prof_thr_node_t *new_node = (prof_thr_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); + prof_thr_node_t *new_node = (prof_thr_node_t *)iallocztm( + tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (log_thr_first == NULL) { log_thr_first = new_node; log_thr_last = new_node; @@ -225,9 +225,9 @@ prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) { if (!log_tables_initialized) { bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, - prof_bt_node_hash, prof_bt_node_keycomp); + prof_bt_node_hash, prof_bt_node_keycomp); bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, - prof_thr_node_hash, prof_thr_node_keycomp); + prof_thr_node_hash, prof_thr_node_keycomp); if (err1 || err2) { goto label_done; } @@ -238,9 +238,9 @@ prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) { nstime_t free_time; nstime_prof_init_update(&free_time); - size_t sz = sizeof(prof_alloc_node_t); - prof_alloc_node_t *new_node = (prof_alloc_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, + size_t sz = sizeof(prof_alloc_node_t); + prof_alloc_node_t *new_node = (prof_alloc_node_t *)iallocztm( + tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); const char *prod_thr_name = tctx->tdata->thread_name; @@ -256,10 +256,10 @@ prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) { prof_bt_t *prod_bt = &tctx->gctx->bt; new_node->next = NULL; - new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, - prod_thr_name); - new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, - cons_thr_name); + new_node->alloc_thr_ind = prof_log_thr_index( + tsd, tctx->tdata->thr_uid, prod_thr_name); + new_node->free_thr_ind = prof_log_thr_index( + tsd, cons_tdata->thr_uid, cons_thr_name); new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); new_node->alloc_time_ns = nstime_ns(&alloc_time); @@ -288,8 +288,8 @@ static bool prof_bt_node_keycomp(const void *k1, const void *k2) { const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; - return prof_bt_keycomp((void *)(&bt_node1->bt), - (void *)(&bt_node2->bt)); + return prof_bt_keycomp( + (void *)(&bt_node1->bt), (void *)(&bt_node2->bt)); } static void @@ -309,7 +309,7 @@ prof_thr_node_keycomp(const void *k1, const void *k2) { size_t prof_log_bt_count(void) { cassert(config_prof); - size_t cnt = 0; + size_t cnt = 0; prof_bt_node_t *node = log_bt_first; while (node != NULL) { cnt++; @@ -322,7 +322,7 @@ prof_log_bt_count(void) { size_t prof_log_alloc_count(void) { cassert(config_prof); - size_t cnt = 0; + size_t cnt = 0; prof_alloc_node_t *node = log_alloc_first; while (node != NULL) { cnt++; @@ -335,7 +335,7 @@ prof_log_alloc_count(void) { size_t prof_log_thr_count(void) { cassert(config_prof); - size_t cnt = 0; + size_t cnt = 0; prof_thr_node_t *node = log_thr_first; while (node != NULL) { cnt++; @@ -374,7 +374,6 @@ prof_log_rep_check(void) { size_t thr_count = prof_log_thr_count(); size_t alloc_count = prof_log_alloc_count(); - if (prof_logging_state == prof_logging_state_stopped) { if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { return true; @@ -435,7 +434,8 @@ prof_log_start(tsdn_t *tsdn, const char *filename) { if (!prof_log_atexit_called) { prof_log_atexit_called = true; if (atexit(prof_log_stop_final) != 0) { - malloc_write(": Error in atexit() " + malloc_write( + ": Error in atexit() " "for logging\n"); if (opt_abort) { abort(); @@ -469,14 +469,14 @@ label_done: } struct prof_emitter_cb_arg_s { - int fd; + int fd; ssize_t ret; }; static void prof_emitter_write_cb(void *opaque, const char *to_write) { - struct prof_emitter_cb_arg_s *arg = - (struct prof_emitter_cb_arg_s *)opaque; + struct prof_emitter_cb_arg_s *arg = (struct prof_emitter_cb_arg_s *) + opaque; size_t bytes = strlen(to_write); if (prof_log_dummy) { return; @@ -501,8 +501,8 @@ prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { char *thr_name = thr_node->name; - emitter_json_kv(emitter, "thr_name", emitter_type_string, - &thr_name); + emitter_json_kv( + emitter, "thr_name", emitter_type_string, &thr_name); emitter_json_object_end(emitter); thr_old_node = thr_node; @@ -521,7 +521,7 @@ prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { * Calculate how many hex digits we need: twice number of bytes, two for * "0x", and then one more for terminating '\0'. */ - char buf[2 * sizeof(intptr_t) + 3]; + char buf[2 * sizeof(intptr_t) + 3]; size_t buf_sz = sizeof(buf); while (bt_node != NULL) { emitter_json_array_begin(emitter); @@ -529,8 +529,8 @@ prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { for (i = 0; i < bt_node->bt.len; i++) { malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); char *trace_str = buf; - emitter_json_value(emitter, emitter_type_string, - &trace_str); + emitter_json_value( + emitter, emitter_type_string, &trace_str); } emitter_json_array_end(emitter); @@ -561,21 +561,21 @@ prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { emitter_json_kv(emitter, "free_trace", emitter_type_size, &alloc_node->free_bt_ind); - emitter_json_kv(emitter, "alloc_timestamp", - emitter_type_uint64, &alloc_node->alloc_time_ns); + emitter_json_kv(emitter, "alloc_timestamp", emitter_type_uint64, + &alloc_node->alloc_time_ns); emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, &alloc_node->free_time_ns); - emitter_json_kv(emitter, "usize", emitter_type_uint64, - &alloc_node->usize); + emitter_json_kv( + emitter, "usize", emitter_type_uint64, &alloc_node->usize); emitter_json_object_end(emitter); alloc_old_node = alloc_node; alloc_node = alloc_node->next; - idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true, - true); + idalloctm( + tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true, true); } emitter_json_array_end(emitter); } @@ -591,15 +591,14 @@ prof_log_emit_metadata(emitter_t *emitter) { emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); char *vers = JEMALLOC_VERSION; - emitter_json_kv(emitter, "version", - emitter_type_string, &vers); + emitter_json_kv(emitter, "version", emitter_type_string, &vers); - emitter_json_kv(emitter, "lg_sample_rate", - emitter_type_int, &lg_prof_sample); + emitter_json_kv( + emitter, "lg_sample_rate", emitter_type_int, &lg_prof_sample); const char *res_type = prof_time_res_mode_names[opt_prof_time_res]; - emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string, - &res_type); + emitter_json_kv( + emitter, "prof_time_resolution", emitter_type_string, &res_type); int pid = prof_getpid(); emitter_json_kv(emitter, "pid", emitter_type_int, &pid); @@ -632,7 +631,6 @@ prof_log_stop(tsdn_t *tsdn) { prof_logging_state = prof_logging_state_dumping; malloc_mutex_unlock(tsdn, &log_mtx); - emitter_t emitter; /* Create a file. */ @@ -645,8 +643,10 @@ prof_log_stop(tsdn_t *tsdn) { } if (fd == -1) { - malloc_printf(": creat() for log file \"%s\" " - " failed with %d\n", log_filename, errno); + malloc_printf( + ": creat() for log file \"%s\" " + " failed with %d\n", + log_filename, errno); if (opt_abort) { abort(); } @@ -659,8 +659,8 @@ prof_log_stop(tsdn_t *tsdn) { buf_writer_t buf_writer; buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL, PROF_LOG_STOP_BUFSIZE); - emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb, - &buf_writer); + emitter_init( + &emitter, emitter_output_json_compact, buf_writer_cb, &buf_writer); emitter_begin(&emitter); prof_log_emit_metadata(&emitter); @@ -701,8 +701,8 @@ JEMALLOC_COLD bool prof_log_init(tsd_t *tsd) { cassert(config_prof); - if (malloc_mutex_init(&log_mtx, "prof_log", - WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { + if (malloc_mutex_init(&log_mtx, "prof_log", WITNESS_RANK_PROF_LOG, + malloc_mutex_rank_exclusive)) { return true; } diff --git a/src/prof_recent.c b/src/prof_recent.c index b5639b4c..f7108bee 100644 --- a/src/prof_recent.c +++ b/src/prof_recent.c @@ -7,18 +7,18 @@ #include "jemalloc/internal/prof_data.h" #include "jemalloc/internal/prof_recent.h" -ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT; -malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */ +ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT; +malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */ static atomic_zd_t prof_recent_alloc_max; -static ssize_t prof_recent_alloc_count = 0; +static ssize_t prof_recent_alloc_count = 0; prof_recent_list_t prof_recent_alloc_list; malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */ static void prof_recent_alloc_max_init(void) { - atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max, - ATOMIC_RELAXED); + atomic_store_zd( + &prof_recent_alloc_max, opt_prof_recent_alloc_max, ATOMIC_RELAXED); } static inline ssize_t @@ -144,26 +144,26 @@ edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) { static inline prof_recent_t * edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); - prof_recent_t *recent_alloc = - edata_prof_recent_alloc_get_no_lock(edata); - assert(recent_alloc == NULL || - prof_recent_alloc_edata_get(tsd, recent_alloc) == edata); + prof_recent_t *recent_alloc = edata_prof_recent_alloc_get_no_lock( + edata); + assert(recent_alloc == NULL + || prof_recent_alloc_edata_get(tsd, recent_alloc) == edata); return recent_alloc; } static prof_recent_t * -edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata, - prof_recent_t *recent_alloc) { +edata_prof_recent_alloc_update_internal( + tsd_t *tsd, edata_t *edata, prof_recent_t *recent_alloc) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); - prof_recent_t *old_recent_alloc = - edata_prof_recent_alloc_get(tsd, edata); + prof_recent_t *old_recent_alloc = edata_prof_recent_alloc_get( + tsd, edata); edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc); return old_recent_alloc; } static void -edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata, - prof_recent_t *recent_alloc) { +edata_prof_recent_alloc_set( + tsd_t *tsd, edata_t *edata, prof_recent_t *recent_alloc) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); assert(recent_alloc != NULL); prof_recent_t *old_recent_alloc = @@ -173,8 +173,8 @@ edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata, } static void -edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata, - prof_recent_t *recent_alloc) { +edata_prof_recent_alloc_reset( + tsd_t *tsd, edata_t *edata, prof_recent_t *recent_alloc) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); assert(recent_alloc != NULL); prof_recent_t *old_recent_alloc = @@ -265,14 +265,14 @@ prof_recent_alloc_assert_count(tsd_t *tsd) { if (!config_debug) { return; } - ssize_t count = 0; + ssize_t count = 0; prof_recent_t *n; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { ++count; } assert(count == prof_recent_alloc_count); - assert(prof_recent_alloc_max_get(tsd) == -1 || - count <= prof_recent_alloc_max_get(tsd)); + assert(prof_recent_alloc_max_get(tsd) == -1 + || count <= prof_recent_alloc_max_get(tsd)); } void @@ -319,8 +319,8 @@ prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) { * the allocation locks. */ prof_recent_t *reserve = NULL; - if (prof_recent_alloc_max_get(tsd) == -1 || - prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) { + if (prof_recent_alloc_max_get(tsd) == -1 + || prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) { assert(prof_recent_alloc_max_get(tsd) != 0); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); reserve = prof_recent_allocate_node(tsd_tsdn(tsd)); @@ -346,8 +346,9 @@ prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) { ql_rotate(&prof_recent_alloc_list, link); } else { /* Otherwise make use of the new node. */ - assert(prof_recent_alloc_max_get(tsd) == -1 || - prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)); + assert(prof_recent_alloc_max_get(tsd) == -1 + || prof_recent_alloc_count + < prof_recent_alloc_max_get(tsd)); if (reserve == NULL) { goto label_rollback; } @@ -421,7 +422,7 @@ prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) { } prof_recent_t *node; - ql_foreach(node, &prof_recent_alloc_list, link) { + ql_foreach (node, &prof_recent_alloc_list, link) { if (prof_recent_alloc_count == max) { break; } @@ -462,7 +463,7 @@ prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) { assert(max >= -1); malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); prof_recent_alloc_assert_count(tsd); - const ssize_t old_max = prof_recent_alloc_max_update(tsd, max); + const ssize_t old_max = prof_recent_alloc_max_update(tsd, max); prof_recent_list_t to_delete; prof_recent_alloc_restore_locked(tsd, &to_delete); malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); @@ -472,7 +473,7 @@ prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) { static void prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) { - char bt_buf[2 * sizeof(intptr_t) + 3]; + char bt_buf[2 * sizeof(intptr_t) + 3]; char *s = bt_buf; assert(tctx != NULL); prof_bt_t *bt = &tctx->gctx->bt; @@ -501,8 +502,8 @@ prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) { emitter_type_string, &thread_name); } uint64_t alloc_time_ns = nstime_ns(&node->alloc_time); - emitter_json_kv(emitter, "alloc_time", emitter_type_uint64, - &alloc_time_ns); + emitter_json_kv( + emitter, "alloc_time", emitter_type_uint64, &alloc_time_ns); emitter_json_array_kv_begin(emitter, "alloc_trace"); prof_recent_alloc_dump_bt(emitter, node->alloc_tctx); emitter_json_array_end(emitter); @@ -539,8 +540,8 @@ prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) { buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL, PROF_RECENT_PRINT_BUFSIZE); emitter_t emitter; - emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb, - &buf_writer); + emitter_init( + &emitter, emitter_output_json_compact, buf_writer_cb, &buf_writer); prof_recent_list_t temp_list; malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); @@ -554,13 +555,13 @@ prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) { emitter_begin(&emitter); uint64_t sample_interval = (uint64_t)1U << lg_prof_sample; - emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64, - &sample_interval); - emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize, - &dump_max); + emitter_json_kv( + &emitter, "sample_interval", emitter_type_uint64, &sample_interval); + emitter_json_kv( + &emitter, "recent_alloc_max", emitter_type_ssize, &dump_max); emitter_json_array_kv_begin(&emitter, "recent_alloc"); prof_recent_t *node; - ql_foreach(node, &temp_list, link) { + ql_foreach (node, &temp_list, link) { prof_recent_alloc_dump_node(&emitter, node); } emitter_json_array_end(&emitter); @@ -587,12 +588,12 @@ prof_recent_init(void) { prof_recent_alloc_max_init(); if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc", - WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump", - WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) { return true; } diff --git a/src/prof_stack_range.c b/src/prof_stack_range.c index f5e5c044..b167b132 100644 --- a/src/prof_stack_range.c +++ b/src/prof_stack_range.c @@ -6,12 +6,12 @@ #if defined(__linux__) && defined(JEMALLOC_HAVE_GETTID) -# include -# include -# include -# include // strtoul -# include -# include +# include +# include +# include +# include // strtoul +# include +# include /* * Converts a string representing a hexadecimal number to an unsigned long long @@ -25,31 +25,31 @@ */ static inline unsigned long long int strtoull_hex(const char *nptr, char **endptr) { - unsigned long long int val = 0; - int ii = 0; - for (; ii < 16; ++ii) { - char c = nptr[ii]; - if (c >= '0' && c <= '9') { - val = (val << 4) + (c - '0'); - } else if (c >= 'a' && c <= 'f') { - val = (val << 4) + (c - 'a' + 10); - } else { - break; - } - } - if (endptr) { - *endptr = (char *)(nptr + ii); - } - return val; + unsigned long long int val = 0; + int ii = 0; + for (; ii < 16; ++ii) { + char c = nptr[ii]; + if (c >= '0' && c <= '9') { + val = (val << 4) + (c - '0'); + } else if (c >= 'a' && c <= 'f') { + val = (val << 4) + (c - 'a' + 10); + } else { + break; + } + } + if (endptr) { + *endptr = (char *)(nptr + ii); + } + return val; } static int prof_mapping_containing_addr(uintptr_t addr, const char *maps_path, - uintptr_t *mm_start, uintptr_t *mm_end) { - int ret = ENOENT; /* not found */ - *mm_start = *mm_end = 0; + uintptr_t *mm_start, uintptr_t *mm_end) { + int ret = ENOENT; /* not found */ + *mm_start = *mm_end = 0; - /* + /* * Each line of /proc//maps is: * - * @@ -57,90 +57,93 @@ prof_mapping_containing_addr(uintptr_t addr, const char *maps_path, * as long as `buf` contains the start of a mapping line it can always be * parsed. */ - static const int kMappingFieldsWidth = 34; + static const int kMappingFieldsWidth = 34; - int fd = -1; - char buf[4096]; - ssize_t remaining = 0; /* actual number of bytes read to buf */ - char *line = NULL; + int fd = -1; + char buf[4096]; + ssize_t remaining = 0; /* actual number of bytes read to buf */ + char *line = NULL; - while (1) { - if (fd < 0) { - /* case 0: initial open of maps file */ - fd = malloc_open(maps_path, O_RDONLY); - if (fd < 0) { - return errno; - } + while (1) { + if (fd < 0) { + /* case 0: initial open of maps file */ + fd = malloc_open(maps_path, O_RDONLY); + if (fd < 0) { + return errno; + } - remaining = malloc_read_fd(fd, buf, sizeof(buf)); - if (remaining <= 0) { - ret = errno; - break; - } - line = buf; - } else if (line == NULL) { - /* case 1: no newline found in buf */ - remaining = malloc_read_fd(fd, buf, sizeof(buf)); - if (remaining <= 0) { - ret = errno; - break; - } - line = memchr(buf, '\n', remaining); - if (line != NULL) { - line++; /* advance to character after newline */ - remaining -= (line - buf); - } - } else if (line != NULL && remaining < kMappingFieldsWidth) { - /* + remaining = malloc_read_fd(fd, buf, sizeof(buf)); + if (remaining <= 0) { + ret = errno; + break; + } + line = buf; + } else if (line == NULL) { + /* case 1: no newline found in buf */ + remaining = malloc_read_fd(fd, buf, sizeof(buf)); + if (remaining <= 0) { + ret = errno; + break; + } + line = memchr(buf, '\n', remaining); + if (line != NULL) { + line++; /* advance to character after newline */ + remaining -= (line - buf); + } + } else if (line != NULL && remaining < kMappingFieldsWidth) { + /* * case 2: found newline but insufficient characters remaining in * buf */ - memcpy(buf, line, - remaining); /* copy remaining characters to start of buf */ - line = buf; + memcpy(buf, line, + remaining); /* copy remaining characters to start of buf */ + line = buf; - size_t count = - malloc_read_fd(fd, buf + remaining, sizeof(buf) - remaining); - if (count <= 0) { - ret = errno; - break; - } + size_t count = malloc_read_fd( + fd, buf + remaining, sizeof(buf) - remaining); + if (count <= 0) { + ret = errno; + break; + } - remaining += count; /* actual number of bytes read to buf */ - } else { - /* case 3: found newline and sufficient characters to parse */ + remaining += + count; /* actual number of bytes read to buf */ + } else { + /* case 3: found newline and sufficient characters to parse */ - /* parse - */ - char *tmp = line; - uintptr_t start_addr = (uintptr_t)strtoull_hex(tmp, &tmp); - if (addr >= start_addr) { - tmp++; /* advance to character after '-' */ - uintptr_t end_addr = (uintptr_t)strtoull_hex(tmp, NULL); - if (addr < end_addr) { - *mm_start = start_addr; - *mm_end = end_addr; - ret = 0; - break; - } - } + /* parse - */ + char *tmp = line; + uintptr_t start_addr = (uintptr_t)strtoull_hex( + tmp, &tmp); + if (addr >= start_addr) { + tmp++; /* advance to character after '-' */ + uintptr_t end_addr = (uintptr_t)strtoull_hex( + tmp, NULL); + if (addr < end_addr) { + *mm_start = start_addr; + *mm_end = end_addr; + ret = 0; + break; + } + } - /* Advance to character after next newline in the current buf. */ - char *prev_line = line; - line = memchr(line, '\n', remaining); - if (line != NULL) { - line++; /* advance to character after newline */ - remaining -= (line - prev_line); - } - } - } + /* Advance to character after next newline in the current buf. */ + char *prev_line = line; + line = memchr(line, '\n', remaining); + if (line != NULL) { + line++; /* advance to character after newline */ + remaining -= (line - prev_line); + } + } + } - malloc_close(fd); - return ret; + malloc_close(fd); + return ret; } int prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high) { - /* + /* * NOTE: Prior to kernel 4.5 an entry for every thread stack was included in * /proc//maps as [STACK:]. Starting with kernel 4.5 only the main * thread stack remains as the [stack] mapping. For other thread stacks the @@ -148,19 +151,19 @@ prof_thread_stack_range(uintptr_t fp, uintptr_t *low, uintptr_t *high) { * labeled as [STACK:tid]). * https://lists.ubuntu.com/archives/kernel-team/2016-March/074681.html */ - char maps_path[64]; // "/proc//task//maps" - malloc_snprintf(maps_path, sizeof(maps_path), "/proc/%d/task/%d/maps", - getpid(), gettid()); - return prof_mapping_containing_addr(fp, maps_path, low, high); + char maps_path[64]; // "/proc//task//maps" + malloc_snprintf(maps_path, sizeof(maps_path), "/proc/%d/task/%d/maps", + getpid(), gettid()); + return prof_mapping_containing_addr(fp, maps_path, low, high); } #else int prof_thread_stack_range( - UNUSED uintptr_t addr, uintptr_t *stack_start, uintptr_t *stack_end) { - *stack_start = *stack_end = 0; - return ENOENT; + UNUSED uintptr_t addr, uintptr_t *stack_start, uintptr_t *stack_end) { + *stack_start = *stack_end = 0; + return ENOENT; } -#endif // __linux__ +#endif // __linux__ diff --git a/src/prof_stats.c b/src/prof_stats.c index 5d1a506b..db248be7 100644 --- a/src/prof_stats.c +++ b/src/prof_stats.c @@ -3,8 +3,8 @@ #include "jemalloc/internal/prof_stats.h" -bool opt_prof_stats = false; -malloc_mutex_t prof_stats_mtx; +bool opt_prof_stats = false; +malloc_mutex_t prof_stats_mtx; static prof_stats_t prof_stats_live[PROF_SC_NSIZES]; static prof_stats_t prof_stats_accum[PROF_SC_NSIZES]; diff --git a/src/prof_sys.c b/src/prof_sys.c index e3b7bbcb..be50c0be 100644 --- a/src/prof_sys.c +++ b/src/prof_sys.c @@ -8,8 +8,8 @@ #include "jemalloc/internal/prof_sys.h" #ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY -#include +# define UNW_LOCAL_ONLY +# include #endif #ifdef JEMALLOC_PROF_LIBGCC @@ -18,14 +18,15 @@ * use libgcc's unwinding functionality, but after we've included that, we've * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. */ -#undef _Unwind_Backtrace -#include -#define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) +# undef _Unwind_Backtrace +# include +# define _Unwind_Backtrace \ + JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) #endif #ifdef JEMALLOC_PROF_FRAME_POINTER // execinfo backtrace() as fallback unwinder -#include +# include #endif /******************************************************************************/ @@ -77,7 +78,7 @@ prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { static _Unwind_Reason_Code prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; - void *ip; + void *ip; cassert(config_prof); @@ -115,14 +116,15 @@ struct stack_range { struct thread_unwind_info { struct stack_range stack_range; - bool fallback; + bool fallback; }; static __thread struct thread_unwind_info unwind_info = { - .stack_range = { - .start = 0, - .end = 0, - }, - .fallback = false, + .stack_range = + { + .start = 0, + .end = 0, + }, + .fallback = false, }; /* thread local */ static void @@ -142,10 +144,11 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { uintptr_t fp = (uintptr_t)__builtin_frame_address(0); /* new thread - get the stack range */ - if (!unwind_info.fallback && - unwind_info.stack_range.start == unwind_info.stack_range.end) { + if (!unwind_info.fallback + && unwind_info.stack_range.start == unwind_info.stack_range.end) { if (prof_thread_stack_range(fp, &unwind_info.stack_range.start, - &unwind_info.stack_range.end) != 0) { + &unwind_info.stack_range.end) + != 0) { unwind_info.fallback = true; } else { assert(fp >= unwind_info.stack_range.start @@ -159,8 +162,8 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { unsigned ii = 0; while (ii < max_len && fp != 0) { - if (fp < unwind_info.stack_range.start || - fp >= unwind_info.stack_range.end) { + if (fp < unwind_info.stack_range.start + || fp >= unwind_info.stack_range.end) { /* * Determining the stack range from procfs can be * relatively expensive especially for programs with @@ -173,7 +176,7 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { unwind_info.fallback = true; goto label_fallback; } - void* ip = ((void **)fp)[1]; + void *ip = ((void **)fp)[1]; if (ip == 0) { break; } @@ -205,21 +208,21 @@ JEMALLOC_DIAGNOSTIC_IGNORE_FRAME_ADDRESS static void prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { /* The input arg must be a constant for __builtin_return_address. */ -#define BT_FRAME(i) \ - if ((i) < max_len) { \ - void *p; \ - if (__builtin_frame_address(i) == 0) { \ - return; \ - } \ - p = __builtin_return_address(i); \ - if (p == NULL) { \ - return; \ - } \ - vec[(i)] = p; \ - *len = (i) + 1; \ - } else { \ - return; \ - } +# define BT_FRAME(i) \ + if ((i) < max_len) { \ + void *p; \ + if (__builtin_frame_address(i) == 0) { \ + return; \ + } \ + p = __builtin_return_address(i); \ + if (p == NULL) { \ + return; \ + } \ + vec[(i)] = p; \ + *len = (i) + 1; \ + } else { \ + return; \ + } cassert(config_prof); assert(vec != NULL); @@ -506,8 +509,8 @@ prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { BT_FRAME(253) BT_FRAME(254) BT_FRAME(255) -#undef BT_FRAME -JEMALLOC_DIAGNOSTIC_POP +# undef BT_FRAME + JEMALLOC_DIAGNOSTIC_POP } #else static void @@ -568,8 +571,9 @@ prof_sys_thread_name_fetch(tsd_t *tsd) { return; } - if (prof_sys_thread_name_read(tdata->thread_name, - PROF_THREAD_NAME_MAX_LEN) != 0) { + if (prof_sys_thread_name_read( + tdata->thread_name, PROF_THREAD_NAME_MAX_LEN) + != 0) { prof_thread_name_clear(tdata); } @@ -592,32 +596,32 @@ prof_get_pid_namespace(void) { #if defined(_WIN32) || defined(__APPLE__) // Not supported, do nothing. #else - char buf[PATH_MAX]; - const char* linkname = -# if defined(__FreeBSD__) || defined(__DragonFly__) + char buf[PATH_MAX]; + const char *linkname = +# if defined(__FreeBSD__) || defined(__DragonFly__) "/proc/curproc/ns/pid" -# else +# else "/proc/self/ns/pid" -# endif +# endif ; ssize_t linklen = -# ifndef JEMALLOC_READLINKAT - readlink(linkname, buf, PATH_MAX) -# else - readlinkat(AT_FDCWD, linkname, buf, PATH_MAX) -# endif +# ifndef JEMALLOC_READLINKAT + readlink(linkname, buf, PATH_MAX) +# else + readlinkat(AT_FDCWD, linkname, buf, PATH_MAX) +# endif ; // namespace string is expected to be like pid:[4026531836] if (linklen > 0) { // Trim the trailing "]" - buf[linklen-1] = '\0'; - char* index = strtok(buf, "pid:["); + buf[linklen - 1] = '\0'; + char *index = strtok(buf, "pid:["); ret = atol(index); } #endif - return ret; + return ret; } /* @@ -647,8 +651,8 @@ struct prof_dump_arg_s { }; static void -prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond, - const char *format, ...) { +prof_dump_check_possible_error( + prof_dump_arg_t *arg, bool err_cond, const char *format, ...) { assert(!arg->error); if (!err_cond) { return; @@ -660,7 +664,7 @@ prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond, } va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; + char buf[PROF_PRINTF_BUFSIZE]; va_start(ap, format); malloc_vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); @@ -692,8 +696,8 @@ prof_dump_flush(void *opaque, const char *s) { cassert(config_prof); prof_dump_arg_t *arg = (prof_dump_arg_t *)opaque; if (!arg->error) { - ssize_t err = prof_dump_write_file(arg->prof_dump_fd, s, - strlen(s)); + ssize_t err = prof_dump_write_file( + arg->prof_dump_fd, s, strlen(s)); prof_dump_check_possible_error(arg, err == -1, ": failed to write during heap profile flush\n"); } @@ -707,36 +711,37 @@ prof_dump_close(prof_dump_arg_t *arg) { } #ifdef __APPLE__ -#include +# include -#ifdef __LP64__ -typedef struct mach_header_64 mach_header_t; +# ifdef __LP64__ +typedef struct mach_header_64 mach_header_t; typedef struct segment_command_64 segment_command_t; -#define MH_MAGIC_VALUE MH_MAGIC_64 -#define MH_CIGAM_VALUE MH_CIGAM_64 -#define LC_SEGMENT_VALUE LC_SEGMENT_64 -#else -typedef struct mach_header mach_header_t; +# define MH_MAGIC_VALUE MH_MAGIC_64 +# define MH_CIGAM_VALUE MH_CIGAM_64 +# define LC_SEGMENT_VALUE LC_SEGMENT_64 +# else +typedef struct mach_header mach_header_t; typedef struct segment_command segment_command_t; -#define MH_MAGIC_VALUE MH_MAGIC -#define MH_CIGAM_VALUE MH_CIGAM -#define LC_SEGMENT_VALUE LC_SEGMENT -#endif +# define MH_MAGIC_VALUE MH_MAGIC +# define MH_CIGAM_VALUE MH_CIGAM +# define LC_SEGMENT_VALUE LC_SEGMENT +# endif static void prof_dump_dyld_image_vmaddr(buf_writer_t *buf_writer, uint32_t image_index) { const mach_header_t *header = (const mach_header_t *) _dyld_get_image_header(image_index); - if (header == NULL || (header->magic != MH_MAGIC_VALUE && - header->magic != MH_CIGAM_VALUE)) { + if (header == NULL + || (header->magic != MH_MAGIC_VALUE + && header->magic != MH_CIGAM_VALUE)) { // Invalid header return; } - intptr_t slide = _dyld_get_image_vmaddr_slide(image_index); - const char *name = _dyld_get_image_name(image_index); - struct load_command *load_cmd = (struct load_command *) - ((char *)header + sizeof(mach_header_t)); + intptr_t slide = _dyld_get_image_vmaddr_slide(image_index); + const char *name = _dyld_get_image_name(image_index); + struct load_command *load_cmd = (struct load_command *)((char *)header + + sizeof(mach_header_t)); for (uint32_t i = 0; load_cmd && (i < header->ncmds); i++) { if (load_cmd->cmd == LC_SEGMENT_VALUE) { const segment_command_t *segment_cmd = @@ -744,14 +749,17 @@ prof_dump_dyld_image_vmaddr(buf_writer_t *buf_writer, uint32_t image_index) { if (!strcmp(segment_cmd->segname, "__TEXT")) { char buffer[PATH_MAX + 1]; malloc_snprintf(buffer, sizeof(buffer), - "%016llx-%016llx: %s\n", segment_cmd->vmaddr + slide, - segment_cmd->vmaddr + slide + segment_cmd->vmsize, name); + "%016llx-%016llx: %s\n", + segment_cmd->vmaddr + slide, + segment_cmd->vmaddr + slide + + segment_cmd->vmsize, + name); buf_writer_cb(buf_writer, buffer); return; } } - load_cmd = - (struct load_command *)((char *)load_cmd + load_cmd->cmdsize); + load_cmd = (struct load_command *)((char *)load_cmd + + load_cmd->cmdsize); } } @@ -772,48 +780,48 @@ prof_dump_maps(buf_writer_t *buf_writer) { prof_dump_dyld_maps(buf_writer); } #else /* !__APPLE__ */ -#ifndef _WIN32 +# ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int prof_open_maps_internal(const char *format, ...) { - int mfd; + int mfd; va_list ap; - char filename[PATH_MAX + 1]; + char filename[PATH_MAX + 1]; va_start(ap, format); malloc_vsnprintf(filename, sizeof(filename), format, ap); va_end(ap); -#if defined(O_CLOEXEC) +# if defined(O_CLOEXEC) mfd = open(filename, O_RDONLY | O_CLOEXEC); -#else +# else mfd = open(filename, O_RDONLY); if (mfd != -1) { fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); } -#endif +# endif return mfd; } -#endif +# endif static int prof_dump_open_maps_impl(void) { int mfd; cassert(config_prof); -#if defined(__FreeBSD__) || defined(__DragonFly__) +# if defined(__FreeBSD__) || defined(__DragonFly__) mfd = prof_open_maps_internal("/proc/curproc/map"); -#elif defined(_WIN32) +# elif defined(_WIN32) mfd = -1; // Not implemented -#else +# else int pid = prof_getpid(); mfd = prof_open_maps_internal("/proc/%d/task/%d/maps", pid, pid); if (mfd == -1) { mfd = prof_open_maps_internal("/proc/%d/maps", pid); } -#endif +# endif return mfd; } prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps = @@ -840,12 +848,12 @@ prof_dump_maps(buf_writer_t *buf_writer) { #endif /* __APPLE__ */ static bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck) { +prof_dump( + tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { cassert(config_prof); assert(tsd_reentrancy_level_get(tsd) == 0); - prof_tdata_t * tdata = prof_tdata_get(tsd, true); + prof_tdata_t *tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { return true; } @@ -892,7 +900,7 @@ prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) { } static const char * -prof_prefix_get(tsdn_t* tsdn) { +prof_prefix_get(tsdn_t *tsdn) { malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx); return prof_prefix == NULL ? opt_prof_prefix : prof_prefix; @@ -919,25 +927,26 @@ prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) { if (opt_prof_pid_namespace) { /* "....v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%ld.%d.%"FMTu64".%c%"FMTu64".heap", prefix, - prof_get_pid_namespace(), prof_getpid(), prof_dump_seq, v, - vseq); + "%s.%ld.%d.%" FMTu64 ".%c%" FMTu64 ".heap", prefix, + prof_get_pid_namespace(), prof_getpid(), + prof_dump_seq, v, vseq); } else { /* "...v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(), - prof_dump_seq, v, vseq); + "%s.%d.%" FMTu64 ".%c%" FMTu64 ".heap", prefix, + prof_getpid(), prof_dump_seq, v, vseq); } } else { if (opt_prof_pid_namespace) { /* ".....heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%ld.%d.%"FMTu64".%c.heap", prefix, - prof_get_pid_namespace(), prof_getpid(), prof_dump_seq, v); + "%s.%ld.%d.%" FMTu64 ".%c.heap", prefix, + prof_get_pid_namespace(), prof_getpid(), + prof_dump_seq, v); } else { /* "....heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(), + "%s.%d.%" FMTu64 ".%c.heap", prefix, prof_getpid(), prof_dump_seq, v); } } @@ -949,11 +958,12 @@ prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) { malloc_mutex_lock(tsdn, &prof_dump_filename_mtx); if (opt_prof_pid_namespace) { malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN, - "%s.%ld.%d.%"FMTu64".json", prof_prefix_get(tsdn), + "%s.%ld.%d.%" FMTu64 ".json", prof_prefix_get(tsdn), prof_get_pid_namespace(), prof_getpid(), ind); } else { malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN, - "%s.%d.%"FMTu64".json", prof_prefix_get(tsdn), prof_getpid(), ind); + "%s.%d.%" FMTu64 ".json", prof_prefix_get(tsdn), + prof_getpid(), ind); } malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); } @@ -980,8 +990,8 @@ prof_prefix_set(tsdn_t *tsdn, const char *prefix) { if (prof_prefix == NULL) { malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); /* Everything is still guarded by ctl_mtx. */ - char *buffer = base_alloc(tsdn, prof_base, - PROF_DUMP_FILENAME_LEN, QUANTUM); + char *buffer = base_alloc( + tsdn, prof_base, PROF_DUMP_FILENAME_LEN, QUANTUM); if (buffer == NULL) { return true; } @@ -1018,7 +1028,8 @@ prof_mdump_impl(tsd_t *tsd, const char *filename) { /* No filename specified, so automatically generate one. */ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx); if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') { - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + malloc_mutex_unlock( + tsd_tsdn(tsd), &prof_dump_filename_mtx); return true; } prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq); diff --git a/src/prof_threshold.c b/src/prof_threshold.c index 0b5cb53c..5b72a491 100644 --- a/src/prof_threshold.c +++ b/src/prof_threshold.c @@ -22,8 +22,8 @@ prof_threshold_hook_set(prof_threshold_hook_t hook) { prof_threshold_hook_t prof_threshold_hook_get(void) { - return (prof_threshold_hook_t)atomic_load_p(&prof_threshold_hook, - ATOMIC_ACQUIRE); + return (prof_threshold_hook_t)atomic_load_p( + &prof_threshold_hook, ATOMIC_ACQUIRE); } /* Invoke callback for threshold reached */ @@ -32,10 +32,10 @@ prof_threshold_update(tsd_t *tsd) { prof_threshold_hook_t prof_threshold_hook = prof_threshold_hook_get(); if (prof_threshold_hook == NULL) { return; - } + } uint64_t alloc = tsd_thread_allocated_get(tsd); uint64_t dalloc = tsd_thread_deallocated_get(tsd); - peak_t *peak = tsd_peakp_get(tsd); + peak_t *peak = tsd_peakp_get(tsd); pre_reentrancy(tsd, NULL); prof_threshold_hook(alloc, dalloc, peak->cur_max); post_reentrancy(tsd); @@ -62,8 +62,8 @@ prof_threshold_enabled(void) { } te_base_cb_t prof_threshold_te_handler = { - .enabled = &prof_threshold_enabled, - .new_event_wait = &prof_threshold_new_event_wait, - .postponed_event_wait = &prof_threshold_postponed_event_wait, - .event_handler = &prof_threshold_event_handler, + .enabled = &prof_threshold_enabled, + .new_event_wait = &prof_threshold_new_event_wait, + .postponed_event_wait = &prof_threshold_postponed_event_wait, + .event_handler = &prof_threshold_event_handler, }; diff --git a/src/psset.c b/src/psset.c index afe9f1c1..509df064 100644 --- a/src/psset.c +++ b/src/psset.c @@ -32,16 +32,16 @@ psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) { psset_bin_stats_accum(&dst->merged, &src->merged); for (int huge = 0; huge < PSSET_NHUGE; huge++) { psset_bin_stats_accum(&dst->slabs[huge], &src->slabs[huge]); - psset_bin_stats_accum(&dst->full_slabs[huge], - &src->full_slabs[huge]); - psset_bin_stats_accum(&dst->empty_slabs[huge], - &src->empty_slabs[huge]); + psset_bin_stats_accum( + &dst->full_slabs[huge], &src->full_slabs[huge]); + psset_bin_stats_accum( + &dst->empty_slabs[huge], &src->empty_slabs[huge]); } for (pszind_t i = 0; i < PSSET_NPSIZES; i++) { - psset_bin_stats_accum(&dst->nonfull_slabs[i][0], - &src->nonfull_slabs[i][0]); - psset_bin_stats_accum(&dst->nonfull_slabs[i][1], - &src->nonfull_slabs[i][1]); + psset_bin_stats_accum( + &dst->nonfull_slabs[i][0], &src->nonfull_slabs[i][0]); + psset_bin_stats_accum( + &dst->nonfull_slabs[i][1], &src->nonfull_slabs[i][1]); } } @@ -83,10 +83,10 @@ psset_slab_stats_insert_remove(psset_stats_t *stats, if (config_debug) { psset_bin_stats_t check_stats[PSSET_NHUGE] = {{0}}; for (int huge = 0; huge < PSSET_NHUGE; huge++) { - psset_bin_stats_accum(&check_stats[huge], - &stats->full_slabs[huge]); - psset_bin_stats_accum(&check_stats[huge], - &stats->empty_slabs[huge]); + psset_bin_stats_accum( + &check_stats[huge], &stats->full_slabs[huge]); + psset_bin_stats_accum( + &check_stats[huge], &stats->empty_slabs[huge]); for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) { psset_bin_stats_accum(&check_stats[huge], &stats->nonfull_slabs[pind][huge]); @@ -112,14 +112,14 @@ psset_slab_stats_insert_remove(psset_stats_t *stats, } static void -psset_slab_stats_insert(psset_stats_t *stats, psset_bin_stats_t *binstats, - hpdata_t *ps) { +psset_slab_stats_insert( + psset_stats_t *stats, psset_bin_stats_t *binstats, hpdata_t *ps) { psset_slab_stats_insert_remove(stats, binstats, ps, true); } static void -psset_slab_stats_remove(psset_stats_t *stats, psset_bin_stats_t *binstats, - hpdata_t *ps) { +psset_slab_stats_remove( + psset_stats_t *stats, psset_bin_stats_t *binstats, hpdata_t *ps) { psset_slab_stats_insert_remove(stats, binstats, ps, false); } @@ -127,9 +127,9 @@ static pszind_t psset_hpdata_heap_index(const hpdata_t *ps) { assert(!hpdata_full(ps)); assert(!hpdata_empty(ps)); - size_t longest_free_range = hpdata_longest_free_range_get(ps); - pszind_t pind = sz_psz2ind(sz_psz_quantize_floor( - longest_free_range << LG_PAGE)); + size_t longest_free_range = hpdata_longest_free_range_get(ps); + pszind_t pind = sz_psz2ind( + sz_psz_quantize_floor(longest_free_range << LG_PAGE)); assert(pind < PSSET_NPSIZES); return pind; } @@ -161,8 +161,8 @@ psset_stats_insert(psset_t *psset, hpdata_t *ps) { psset_slab_stats_insert(stats, psset->stats.full_slabs, ps); } else { pszind_t pind = psset_hpdata_heap_index(ps); - psset_slab_stats_insert(stats, psset->stats.nonfull_slabs[pind], - ps); + psset_slab_stats_insert( + stats, psset->stats.nonfull_slabs[pind], ps); } } @@ -175,8 +175,8 @@ psset_stats_remove(psset_t *psset, hpdata_t *ps) { psset_slab_stats_remove(stats, psset->stats.full_slabs, ps); } else { pszind_t pind = psset_hpdata_heap_index(ps); - psset_slab_stats_remove(stats, psset->stats.nonfull_slabs[pind], - ps); + psset_slab_stats_remove( + stats, psset->stats.nonfull_slabs[pind], ps); } } @@ -264,7 +264,7 @@ psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) { * purge LRU within a given dirtiness bucket. */ if (hpdata_purge_allowed_get(ps)) { - size_t ind = psset_purge_list_ind(ps); + size_t ind = psset_purge_list_ind(ps); hpdata_purge_list_t *purge_list = &psset->to_purge[ind]; hpdata_purge_list_remove(purge_list, ps); if (hpdata_purge_list_empty(purge_list)) { @@ -276,14 +276,13 @@ psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) { static void psset_maybe_insert_purge_list(psset_t *psset, hpdata_t *ps) { if (hpdata_purge_allowed_get(ps)) { - size_t ind = psset_purge_list_ind(ps); + size_t ind = psset_purge_list_ind(ps); hpdata_purge_list_t *purge_list = &psset->to_purge[ind]; if (hpdata_purge_list_empty(purge_list)) { fb_set(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind); } hpdata_purge_list_append(purge_list, ps); } - } void @@ -343,13 +342,13 @@ psset_enumerate_search(psset_t *psset, pszind_t pind, size_t size) { return NULL; } - hpdata_t *ps = NULL; + hpdata_t *ps = NULL; hpdata_age_heap_enumerate_helper_t helper; hpdata_age_heap_enumerate_prepare(&psset->pageslabs[pind], &helper, PSSET_ENUMERATE_MAX_NUM, sizeof(helper.bfs_queue) / sizeof(void *)); - while ((ps = hpdata_age_heap_enumerate_next(&psset->pageslabs[pind], - &helper))) { + while ((ps = hpdata_age_heap_enumerate_next( + &psset->pageslabs[pind], &helper))) { if (hpdata_longest_free_range_get(ps) >= size) { return ps; } @@ -363,7 +362,7 @@ psset_pick_alloc(psset_t *psset, size_t size) { assert((size & PAGE_MASK) == 0); assert(size <= HUGEPAGE); - pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size)); + pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size)); hpdata_t *ps = NULL; /* See comments in eset_first_fit for why we enumerate search below. */ @@ -375,8 +374,8 @@ psset_pick_alloc(psset_t *psset, size_t size) { } } - pszind_t pind = (pszind_t)fb_ffs(psset->pageslab_bitmap, PSSET_NPSIZES, - (size_t)min_pind); + pszind_t pind = (pszind_t)fb_ffs( + psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)min_pind); if (pind == PSSET_NPSIZES) { return hpdata_empty_list_first(&psset->empty); } @@ -392,8 +391,8 @@ psset_pick_alloc(psset_t *psset, size_t size) { hpdata_t * psset_pick_purge(psset_t *psset) { - ssize_t ind_ssz = fb_fls(psset->purge_bitmap, PSSET_NPURGE_LISTS, - PSSET_NPURGE_LISTS - 1); + ssize_t ind_ssz = fb_fls( + psset->purge_bitmap, PSSET_NPURGE_LISTS, PSSET_NPURGE_LISTS - 1); if (ind_ssz < 0) { return NULL; } diff --git a/src/rtree.c b/src/rtree.c index b6ac04b7..ac27f829 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -20,7 +20,7 @@ rtree_new(rtree_t *rtree, base_t *base, bool zeroed) { rtree->base = base; if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { return true; } @@ -29,19 +29,19 @@ rtree_new(rtree_t *rtree, base_t *base, bool zeroed) { static rtree_node_elm_t * rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - return (rtree_node_elm_t *)base_alloc_rtree(tsdn, rtree->base, - nelms * sizeof(rtree_node_elm_t)); + return (rtree_node_elm_t *)base_alloc_rtree( + tsdn, rtree->base, nelms * sizeof(rtree_node_elm_t)); } static rtree_leaf_elm_t * rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - return (rtree_leaf_elm_t *)base_alloc_rtree(tsdn, rtree->base, - nelms * sizeof(rtree_leaf_elm_t)); + return (rtree_leaf_elm_t *)base_alloc_rtree( + tsdn, rtree->base, nelms * sizeof(rtree_leaf_elm_t)); } static rtree_node_elm_t * -rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, - atomic_p_t *elmp) { +rtree_node_init( + tsdn_t *tsdn, rtree_t *rtree, unsigned level, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); /* * If *elmp is non-null, then it was initialized with the init lock @@ -49,8 +49,8 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, */ rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); if (node == NULL) { - node = rtree_node_alloc(tsdn, rtree, ZU(1) << - rtree_levels[level].bits); + node = rtree_node_alloc( + tsdn, rtree, ZU(1) << rtree_levels[level].bits); if (node == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; @@ -75,8 +75,8 @@ rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { */ rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); if (leaf == NULL) { - leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << - rtree_levels[RTREE_HEIGHT-1].bits); + leaf = rtree_leaf_alloc( + tsdn, rtree, ZU(1) << rtree_levels[RTREE_HEIGHT - 1].bits); if (leaf == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; @@ -107,11 +107,11 @@ rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *node; if (dependent) { - node = (rtree_node_elm_t *)atomic_load_p(&elm->child, - ATOMIC_RELAXED); + node = (rtree_node_elm_t *)atomic_load_p( + &elm->child, ATOMIC_RELAXED); } else { - node = (rtree_node_elm_t *)atomic_load_p(&elm->child, - ATOMIC_ACQUIRE); + node = (rtree_node_elm_t *)atomic_load_p( + &elm->child, ATOMIC_ACQUIRE); } assert(!dependent || node != NULL); @@ -136,11 +136,11 @@ rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_leaf_elm_t *leaf; if (dependent) { - leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, - ATOMIC_RELAXED); + leaf = (rtree_leaf_elm_t *)atomic_load_p( + &elm->child, ATOMIC_RELAXED); } else { - leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, - ATOMIC_ACQUIRE); + leaf = (rtree_leaf_elm_t *)atomic_load_p( + &elm->child, ATOMIC_ACQUIRE); } assert(!dependent || leaf != NULL); @@ -181,53 +181,54 @@ rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, } } -#define RTREE_GET_CHILD(level) { \ - assert(level < RTREE_HEIGHT-1); \ - if (level != 0 && !dependent && \ - unlikely(!rtree_node_valid(node))) { \ - return NULL; \ - } \ - uintptr_t subkey = rtree_subkey(key, level); \ - if (level + 2 < RTREE_HEIGHT) { \ - node = init_missing ? \ - rtree_child_node_read(tsdn, rtree, \ - &node[subkey], level, dependent) : \ - rtree_child_node_tryread(&node[subkey], \ - dependent); \ - } else { \ - leaf = init_missing ? \ - rtree_child_leaf_read(tsdn, rtree, \ - &node[subkey], level, dependent) : \ - rtree_child_leaf_tryread(&node[subkey], \ - dependent); \ - } \ +#define RTREE_GET_CHILD(level) \ + { \ + assert(level < RTREE_HEIGHT - 1); \ + if (level != 0 && !dependent \ + && unlikely(!rtree_node_valid(node))) { \ + return NULL; \ + } \ + uintptr_t subkey = rtree_subkey(key, level); \ + if (level + 2 < RTREE_HEIGHT) { \ + node = init_missing \ + ? rtree_child_node_read(tsdn, rtree, \ + &node[subkey], level, dependent) \ + : rtree_child_node_tryread( \ + &node[subkey], dependent); \ + } else { \ + leaf = init_missing \ + ? rtree_child_leaf_read(tsdn, rtree, \ + &node[subkey], level, dependent) \ + : rtree_child_leaf_tryread( \ + &node[subkey], dependent); \ + } \ } /* * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): * (1) evict last entry in L2 cache; (2) move the collision slot from L1 * cache down to L2; and 3) fill L1. */ -#define RTREE_GET_LEAF(level) { \ - assert(level == RTREE_HEIGHT-1); \ - if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ - return NULL; \ - } \ - if (RTREE_CTX_NCACHE_L2 > 1) { \ - memmove(&rtree_ctx->l2_cache[1], \ - &rtree_ctx->l2_cache[0], \ - sizeof(rtree_ctx_cache_elm_t) * \ - (RTREE_CTX_NCACHE_L2 - 1)); \ - } \ - size_t slot = rtree_cache_direct_map(key); \ - rtree_ctx->l2_cache[0].leafkey = \ - rtree_ctx->cache[slot].leafkey; \ - rtree_ctx->l2_cache[0].leaf = \ - rtree_ctx->cache[slot].leaf; \ - uintptr_t leafkey = rtree_leafkey(key); \ - rtree_ctx->cache[slot].leafkey = leafkey; \ - rtree_ctx->cache[slot].leaf = leaf; \ - uintptr_t subkey = rtree_subkey(key, level); \ - return &leaf[subkey]; \ +#define RTREE_GET_LEAF(level) \ + { \ + assert(level == RTREE_HEIGHT - 1); \ + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ + return NULL; \ + } \ + if (RTREE_CTX_NCACHE_L2 > 1) { \ + memmove(&rtree_ctx->l2_cache[1], \ + &rtree_ctx->l2_cache[0], \ + sizeof(rtree_ctx_cache_elm_t) \ + * (RTREE_CTX_NCACHE_L2 - 1)); \ + } \ + size_t slot = rtree_cache_direct_map(key); \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = rtree_ctx->cache[slot].leaf; \ + uintptr_t leafkey = rtree_leafkey(key); \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, level); \ + return &leaf[subkey]; \ } if (RTREE_HEIGHT > 1) { RTREE_GET_CHILD(0) @@ -236,11 +237,11 @@ rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, RTREE_GET_CHILD(1) } if (RTREE_HEIGHT > 3) { - for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { + for (unsigned i = 2; i < RTREE_HEIGHT - 1; i++) { RTREE_GET_CHILD(i) } } - RTREE_GET_LEAF(RTREE_HEIGHT-1) + RTREE_GET_LEAF(RTREE_HEIGHT - 1) #undef RTREE_GET_CHILD #undef RTREE_GET_LEAF not_reached(); diff --git a/src/safety_check.c b/src/safety_check.c index d3f68fbc..d052718d 100644 --- a/src/safety_check.c +++ b/src/safety_check.c @@ -3,20 +3,24 @@ static safety_check_abort_hook_t safety_check_abort; -void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr, +void +safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr, size_t true_size, size_t input_size) { - char *src = current_dealloc ? "the current pointer being freed" : - "in thread cache, possibly from previous deallocations"; + char *src = current_dealloc + ? "the current pointer being freed" + : "in thread cache, possibly from previous deallocations"; char *suggest_debug_build = config_debug ? "" : " --enable-debug or"; - safety_check_fail(": size mismatch detected (true size %zu " + safety_check_fail( + ": size mismatch detected (true size %zu " "vs input size %zu), likely caused by application sized " "deallocation bugs (source address: %p, %s). Suggest building with" "%s address sanitizer for debugging. Abort.\n", true_size, input_size, ptr, src, suggest_debug_build); } -void safety_check_set_abort(safety_check_abort_hook_t abort_fn) { +void +safety_check_set_abort(safety_check_abort_hook_t abort_fn) { safety_check_abort = abort_fn; } @@ -25,7 +29,8 @@ void safety_check_set_abort(safety_check_abort_hook_t abort_fn) { * because there are cases only logging crash stack traces. */ static void -safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(const char *buf) { +safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug( + const char *buf) { if (safety_check_abort == NULL) { malloc_write(buf); abort(); @@ -34,7 +39,8 @@ safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(con } } -void safety_check_fail(const char *format, ...) { +void +safety_check_fail(const char *format, ...) { char buf[MALLOC_PRINTF_BUFSIZE]; va_list ap; @@ -42,5 +48,6 @@ void safety_check_fail(const char *format, ...) { malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap); va_end(ap); - safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug(buf); + safety_check_detected_heap_corruption___run_address_sanitizer_build_to_debug( + buf); } diff --git a/src/san.c b/src/san.c index 28ea3d7c..5448c67f 100644 --- a/src/san.c +++ b/src/san.c @@ -20,8 +20,8 @@ ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT; uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT; static inline void -san_find_guarded_addr(edata_t *edata, void **guard1, void **guard2, - void **addr, size_t size, bool left, bool right) { +san_find_guarded_addr(edata_t *edata, void **guard1, void **guard2, void **addr, + size_t size, bool left, bool right) { assert(!edata_guarded_get(edata)); assert(size % PAGE == 0); *addr = edata_base_get(edata); @@ -74,8 +74,8 @@ san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap, : san_one_side_unguarded_sz(size_with_guards); void *guard1, *guard2, *addr; - san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left, - right); + san_find_guarded_addr( + edata, &guard1, &guard2, &addr, usize, left, right); assert(edata_state_get(edata) == extent_state_active); ehooks_guard(tsdn, ehooks, guard1, guard2); @@ -109,8 +109,8 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, : san_one_side_guarded_sz(size); void *guard1, *guard2, *addr; - san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left, - right); + san_find_unguarded_addr( + edata, &guard1, &guard2, &addr, size, left, right); ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2); @@ -130,15 +130,15 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, } void -san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap, bool left, bool right) { +san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap, + bool left, bool right) { san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right, /* remap */ true); } void -san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap) { +san_unguard_pages_pre_destroy( + tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) { emap_assert_not_mapped(tsdn, emap, edata); /* * We don't want to touch the emap of about to be destroyed extents, as @@ -146,7 +146,7 @@ san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, * we unguard the extents to the right, because retained extents only * own their right guard page per san_bump_alloc's logic. */ - san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false, + san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false, /* right */ true, /* remap */ false); } @@ -163,9 +163,9 @@ san_stashed_corrupted(void *ptr, size_t size) { void *first, *mid, *last; san_junk_ptr_locations(ptr, size, &first, &mid, &last); - if (*(uintptr_t *)first != uaf_detect_junk || - *(uintptr_t *)mid != uaf_detect_junk || - *(uintptr_t *)last != uaf_detect_junk) { + if (*(uintptr_t *)first != uaf_detect_junk + || *(uintptr_t *)mid != uaf_detect_junk + || *(uintptr_t *)last != uaf_detect_junk) { return true; } @@ -183,7 +183,8 @@ san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) { assert(stashed != NULL); assert(cache_bin_nonfast_aligned(stashed)); if (unlikely(san_stashed_corrupted(stashed, usize))) { - safety_check_fail(": Write-after-free " + safety_check_fail( + ": Write-after-free " "detected on deallocated pointer %p (size %zu).\n", stashed, usize); } diff --git a/src/san_bump.c b/src/san_bump.c index 88897455..09ed18ca 100644 --- a/src/san_bump.c +++ b/src/san_bump.c @@ -7,30 +7,29 @@ #include "jemalloc/internal/ehooks.h" #include "jemalloc/internal/edata_cache.h" -static bool -san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, - ehooks_t *ehooks, size_t size); +static bool san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, + pac_t *pac, ehooks_t *ehooks, size_t size); edata_t * -san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, +san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, ehooks_t *ehooks, size_t size, bool zero) { assert(san_bump_enabled()); - edata_t* to_destroy; - size_t guarded_size = san_one_side_guarded_sz(size); + edata_t *to_destroy; + size_t guarded_size = san_one_side_guarded_sz(size); malloc_mutex_lock(tsdn, &sba->mtx); - if (sba->curr_reg == NULL || - edata_size_get(sba->curr_reg) < guarded_size) { + if (sba->curr_reg == NULL + || edata_size_get(sba->curr_reg) < guarded_size) { /* * If the current region can't accommodate the allocation, * try replacing it with a larger one and destroy current if the * replacement succeeds. */ to_destroy = sba->curr_reg; - bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks, - guarded_size); + bool err = san_bump_grow_locked( + tsdn, sba, pac, ehooks, guarded_size); if (err) { goto label_err; } @@ -40,9 +39,9 @@ san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, assert(guarded_size <= edata_size_get(sba->curr_reg)); size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size; - edata_t* edata; + edata_t *edata; if (trail_size != 0) { - edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac, + edata_t *curr_reg_trail = extent_split_wrapper(tsdn, pac, ehooks, sba->curr_reg, guarded_size, trail_size, /* holding_core_locks */ true); if (curr_reg_trail == NULL) { @@ -69,9 +68,8 @@ san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, /* right */ true, /* remap */ true); if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero, - /* growing_retained */ false)) { - extent_record(tsdn, pac, ehooks, &pac->ecache_retained, - edata); + /* growing_retained */ false)) { + extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata); return NULL; } @@ -90,9 +88,10 @@ san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, ehooks_t *ehooks, size_t size) { malloc_mutex_assert_owner(tsdn, &sba->mtx); - bool committed = false, zeroed = false; - size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size : - SBA_RETAINED_ALLOC_SIZE; + bool committed = false, zeroed = false; + size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE + ? size + : SBA_RETAINED_ALLOC_SIZE; assert((alloc_size & PAGE_MASK) == 0); sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL, alloc_size, PAGE, zeroed, &committed, diff --git a/src/sc.c b/src/sc.c index e4a94d89..014ab95d 100644 --- a/src/sc.c +++ b/src/sc.c @@ -27,7 +27,7 @@ slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) { size_t try_slab_size = page; size_t try_nregs = try_slab_size / reg_size; size_t perfect_slab_size = 0; - bool perfect = false; + bool perfect = false; /* * This loop continues until we find the least common multiple of the * page size and size class size. Size classes are all of the form @@ -106,7 +106,7 @@ size_classes( /* Outputs that we update as we go. */ size_t lookup_maxclass = 0; size_t small_maxclass = 0; - int lg_large_minclass = 0; + int lg_large_minclass = 0; size_t large_maxclass = 0; /* Tiny size classes. */ @@ -209,7 +209,7 @@ size_classes( lg_delta++; } /* Additional outputs. */ - int nsizes = index; + int nsizes = index; unsigned lg_ceil_nsizes = lg_ceil(nsizes); /* Fill in the output data. */ @@ -292,8 +292,8 @@ sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) { if (!sc->bin) { break; } - size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta, - sc->ndelta); + size_t reg_size = reg_size_compute( + sc->lg_base, sc->lg_delta, sc->ndelta); if (begin <= reg_size && reg_size <= end) { sc_data_update_sc_slab_size(sc, reg_size, pgs); } diff --git a/src/sec.c b/src/sec.c index 67585a71..36cd2dcc 100644 --- a/src/sec.c +++ b/src/sec.c @@ -6,12 +6,12 @@ static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, bool guarded, bool frequent_reuse, bool *deferred_work_generated); -static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, - size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); -static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, - size_t old_size, size_t new_size, bool *deferred_work_generated); -static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated); +static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); +static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated); +static void sec_dalloc( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated); static void sec_bin_init(sec_bin_t *bin) { @@ -29,16 +29,16 @@ sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, * USIZE_GROW_SLOW_THRESHOLD because the usize above this increases * by PAGE and the number of usizes is too large. */ - assert(!sz_large_size_classes_disabled() || - opts->max_alloc <= USIZE_GROW_SLOW_THRESHOLD); + assert(!sz_large_size_classes_disabled() + || opts->max_alloc <= USIZE_GROW_SLOW_THRESHOLD); - size_t max_alloc = PAGE_FLOOR(opts->max_alloc); + size_t max_alloc = PAGE_FLOOR(opts->max_alloc); pszind_t npsizes = sz_psz2ind(max_alloc) + 1; size_t sz_shards = opts->nshards * sizeof(sec_shard_t); size_t sz_bins = opts->nshards * (size_t)npsizes * sizeof(sec_bin_t); size_t sz_alloc = sz_shards + sz_bins; - void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE); + void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE); if (dynalloc == NULL) { return true; } @@ -74,7 +74,6 @@ sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, assert((char *)bin_cur == ((char *)dynalloc + sz_alloc)); sec->fallback = fallback; - sec->opts = *opts; sec->npsizes = npsizes; @@ -102,7 +101,7 @@ sec_shard_pick(tsdn_t *tsdn, sec_t *sec) { if (tsdn_null(tsdn)) { return &sec->shards[0]; } - tsd_t *tsd = tsdn_tsd(tsdn); + tsd_t *tsd = tsdn_tsd(tsdn); uint8_t *idxp = tsd_sec_shardp_get(tsd); if (*idxp == (uint8_t)-1) { /* @@ -111,9 +110,10 @@ sec_shard_pick(tsdn_t *tsdn, sec_t *sec) { * number to store 32 bits, since we'll deliberately overflow * when we multiply by the number of shards. */ - uint64_t rand32 = prng_lg_range_u64(tsd_prng_statep_get(tsd), 32); - uint32_t idx = - (uint32_t)((rand32 * (uint64_t)sec->opts.nshards) >> 32); + uint64_t rand32 = prng_lg_range_u64( + tsd_prng_statep_get(tsd), 32); + uint32_t idx = (uint32_t)((rand32 * (uint64_t)sec->opts.nshards) + >> 32); assert(idx < (uint32_t)sec->opts.nshards); *idxp = (uint8_t)idx; } @@ -157,13 +157,13 @@ sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) { malloc_mutex_unlock(tsdn, &shard->mtx); bool deferred_work_generated = false; - pai_dalloc_batch(tsdn, sec->fallback, &to_flush, - &deferred_work_generated); + pai_dalloc_batch( + tsdn, sec->fallback, &to_flush, &deferred_work_generated); } static edata_t * -sec_shard_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, - sec_bin_t *bin) { +sec_shard_alloc_locked( + tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, sec_bin_t *bin) { malloc_mutex_assert_owner(tsdn, &shard->mtx); if (!shard->enabled) { return NULL; @@ -186,7 +186,7 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, edata_list_active_t result; edata_list_active_init(&result); - bool deferred_work_generated = false; + bool deferred_work_generated = false; size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size, 1 + sec->opts.batch_fill_extra, &result, frequent_reuse, &deferred_work_generated); @@ -243,8 +243,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, assert(pszind < sec->npsizes); sec_shard_t *shard = sec_shard_pick(tsdn, sec); - sec_bin_t *bin = &shard->bins[pszind]; - bool do_batch_fill = false; + sec_bin_t *bin = &shard->bins[pszind]; + bool do_batch_fill = false; malloc_mutex_lock(tsdn, &shard->mtx); edata_t *edata = sec_shard_alloc_locked(tsdn, sec, shard, bin); @@ -258,8 +258,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, malloc_mutex_unlock(tsdn, &shard->mtx); if (edata == NULL) { if (do_batch_fill) { - edata = sec_batch_fill_and_alloc(tsdn, sec, shard, bin, - size, frequent_reuse); + edata = sec_batch_fill_and_alloc( + tsdn, sec, shard, bin, size, frequent_reuse); } else { edata = pai_alloc(tsdn, sec->fallback, size, alignment, zero, /* guarded */ false, frequent_reuse, @@ -304,16 +304,16 @@ sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) { * rare pathways. */ bool deferred_work_generated = false; - pai_dalloc_batch(tsdn, sec->fallback, &to_flush, - &deferred_work_generated); + pai_dalloc_batch( + tsdn, sec->fallback, &to_flush, &deferred_work_generated); } static void -sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, - edata_t *edata) { +sec_shard_dalloc_and_unlock( + tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, edata_t *edata) { malloc_mutex_assert_owner(tsdn, &shard->mtx); assert(shard->bytes_cur <= sec->opts.max_bytes); - size_t size = edata_size_get(edata); + size_t size = edata_size_get(edata); pszind_t pszind = sz_psz2ind(size); assert(pszind < sec->npsizes); /* @@ -342,13 +342,12 @@ sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, } static void -sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated) { +sec_dalloc( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) { sec_t *sec = (sec_t *)self; if (sec->opts.nshards == 0 || edata_size_get(edata) > sec->opts.max_alloc) { - pai_dalloc(tsdn, sec->fallback, edata, - deferred_work_generated); + pai_dalloc(tsdn, sec->fallback, edata, deferred_work_generated); return; } sec_shard_t *shard = sec_shard_pick(tsdn, sec); @@ -357,8 +356,7 @@ sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata); } else { malloc_mutex_unlock(tsdn, &shard->mtx); - pai_dalloc(tsdn, sec->fallback, edata, - deferred_work_generated); + pai_dalloc(tsdn, sec->fallback, edata, deferred_work_generated); } } @@ -398,12 +396,12 @@ sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats) { } void -sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec, - mutex_prof_data_t *mutex_prof_data) { +sec_mutex_stats_read( + tsdn_t *tsdn, sec_t *sec, mutex_prof_data_t *mutex_prof_data) { for (size_t i = 0; i < sec->opts.nshards; i++) { malloc_mutex_lock(tsdn, &sec->shards[i].mtx); - malloc_mutex_prof_accum(tsdn, mutex_prof_data, - &sec->shards[i].mtx); + malloc_mutex_prof_accum( + tsdn, mutex_prof_data, &sec->shards[i].mtx); malloc_mutex_unlock(tsdn, &sec->shards[i].mtx); } } diff --git a/src/stats.c b/src/stats.c index b2a00319..84af3911 100644 --- a/src/stats.c +++ b/src/stats.c @@ -11,45 +11,49 @@ static const char *const global_mutex_names[mutex_prof_num_global_mutexes] = { #define OP(mtx) #mtx, - MUTEX_PROF_GLOBAL_MUTEXES + MUTEX_PROF_GLOBAL_MUTEXES #undef OP }; static const char *const arena_mutex_names[mutex_prof_num_arena_mutexes] = { #define OP(mtx) #mtx, - MUTEX_PROF_ARENA_MUTEXES + MUTEX_PROF_ARENA_MUTEXES #undef OP }; -#define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ - xmallctl(n, (void *)v, &sz, NULL, 0); \ -} while (0) +#define CTL_GET(n, v, t) \ + do { \ + size_t sz = sizeof(t); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ + } while (0) -#define CTL_LEAF_PREPARE(mib, miblen, name) do { \ - assert(miblen < CTL_MAX_DEPTH); \ - size_t miblen_new = CTL_MAX_DEPTH; \ - xmallctlmibnametomib(mib, miblen, name, &miblen_new); \ - assert(miblen_new > miblen); \ -} while (0) +#define CTL_LEAF_PREPARE(mib, miblen, name) \ + do { \ + assert(miblen < CTL_MAX_DEPTH); \ + size_t miblen_new = CTL_MAX_DEPTH; \ + xmallctlmibnametomib(mib, miblen, name, &miblen_new); \ + assert(miblen_new > miblen); \ + } while (0) -#define CTL_LEAF(mib, miblen, leaf, v, t) do { \ - assert(miblen < CTL_MAX_DEPTH); \ - size_t miblen_new = CTL_MAX_DEPTH; \ - size_t sz = sizeof(t); \ - xmallctlbymibname(mib, miblen, leaf, &miblen_new, (void *)v, \ - &sz, NULL, 0); \ - assert(miblen_new == miblen + 1); \ -} while (0) +#define CTL_LEAF(mib, miblen, leaf, v, t) \ + do { \ + assert(miblen < CTL_MAX_DEPTH); \ + size_t miblen_new = CTL_MAX_DEPTH; \ + size_t sz = sizeof(t); \ + xmallctlbymibname( \ + mib, miblen, leaf, &miblen_new, (void *)v, &sz, NULL, 0); \ + assert(miblen_new == miblen + 1); \ + } while (0) -#define CTL_MIB_GET(n, i, v, t, ind) do { \ - size_t mib[CTL_MAX_DEPTH]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[(ind)] = (i); \ - xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ -} while (0) +#define CTL_MIB_GET(n, i, v, t, ind) \ + do { \ + size_t mib[CTL_MAX_DEPTH]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[(ind)] = (i); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ + } while (0) #define CTL_M1_GET(n, i, v, t) CTL_MIB_GET(n, i, v, t, 1) #define CTL_M2_GET(n, i, v, t) CTL_MIB_GET(n, i, v, t, 2) @@ -58,10 +62,10 @@ static const char *const arena_mutex_names[mutex_prof_num_arena_mutexes] = { /* Data. */ bool opt_stats_print = false; -char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; +char opt_stats_print_opts[stats_print_tot_num_options + 1] = ""; int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT; -char opt_stats_interval_opts[stats_print_tot_num_options+1] = ""; +char opt_stats_interval_opts[stats_print_tot_num_options + 1] = ""; static counter_accum_t stats_interval_accumulated; /* Per thread batch accum size for stats_interval. */ @@ -111,8 +115,8 @@ get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { static void mutex_stats_init_cols(emitter_row_t *row, const char *table_name, emitter_col_t *name, - emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], - emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; @@ -128,13 +132,13 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name, #define WIDTH_uint32_t 12 #define WIDTH_uint64_t 16 -#define OP(counter, counter_type, human, derived, base_counter) \ - col = &col_##counter_type[k_##counter_type]; \ - ++k_##counter_type; \ - emitter_col_init(col, row); \ - col->justify = emitter_justify_right; \ - col->width = derived ? 8 : WIDTH_##counter_type; \ - col->type = emitter_type_title; \ +#define OP(counter, counter_type, human, derived, base_counter) \ + col = &col_##counter_type[k_##counter_type]; \ + ++k_##counter_type; \ + emitter_col_init(col, row); \ + col->justify = emitter_justify_right; \ + col->width = derived ? 8 : WIDTH_##counter_type; \ + col->type = emitter_type_title; \ col->str_val = human; MUTEX_PROF_COUNTERS #undef OP @@ -146,9 +150,9 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name, static void mutex_stats_read_global(size_t mib[], size_t miblen, const char *name, emitter_col_t *col_name, - emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], - emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], - uint64_t uptime) { + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], + uint64_t uptime) { CTL_LEAF_PREPARE(mib, miblen, name); size_t miblen_name = miblen + 1; @@ -157,18 +161,17 @@ mutex_stats_read_global(size_t mib[], size_t miblen, const char *name, emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 -#define OP(counter, counter_type, human, derived, base_counter) \ - dst = &col_##counter_type[mutex_counter_##counter]; \ - dst->type = EMITTER_TYPE_##counter_type; \ - if (!derived) { \ - CTL_LEAF(mib, miblen_name, #counter, \ - (counter_type *)&dst->bool_val, counter_type); \ - } else { \ - emitter_col_t *base = \ - &col_##counter_type[mutex_counter_##base_counter]; \ - dst->counter_type##_val = \ - (counter_type)rate_per_second( \ - base->counter_type##_val, uptime); \ +#define OP(counter, counter_type, human, derived, base_counter) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + if (!derived) { \ + CTL_LEAF(mib, miblen_name, #counter, \ + (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = \ + &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = (counter_type)rate_per_second( \ + base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP @@ -179,9 +182,9 @@ mutex_stats_read_global(size_t mib[], size_t miblen, const char *name, static void mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name, emitter_col_t *col_name, - emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], - emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], - uint64_t uptime) { + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], + uint64_t uptime) { CTL_LEAF_PREPARE(mib, miblen, name); size_t miblen_name = miblen + 1; @@ -190,18 +193,17 @@ mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name, emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 -#define OP(counter, counter_type, human, derived, base_counter) \ - dst = &col_##counter_type[mutex_counter_##counter]; \ - dst->type = EMITTER_TYPE_##counter_type; \ - if (!derived) { \ - CTL_LEAF(mib, miblen_name, #counter, \ - (counter_type *)&dst->bool_val, counter_type); \ - } else { \ - emitter_col_t *base = \ - &col_##counter_type[mutex_counter_##base_counter]; \ - dst->counter_type##_val = \ - (counter_type)rate_per_second( \ - base->counter_type##_val, uptime); \ +#define OP(counter, counter_type, human, derived, base_counter) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + if (!derived) { \ + CTL_LEAF(mib, miblen_name, #counter, \ + (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = \ + &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = (counter_type)rate_per_second( \ + base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP @@ -213,7 +215,7 @@ static void mutex_stats_read_arena_bin(size_t mib[], size_t miblen, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], - uint64_t uptime) { + uint64_t uptime) { CTL_LEAF_PREPARE(mib, miblen, "mutex"); size_t miblen_mutex = miblen + 1; @@ -221,18 +223,17 @@ mutex_stats_read_arena_bin(size_t mib[], size_t miblen, #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 -#define OP(counter, counter_type, human, derived, base_counter) \ - dst = &col_##counter_type[mutex_counter_##counter]; \ - dst->type = EMITTER_TYPE_##counter_type; \ - if (!derived) { \ - CTL_LEAF(mib, miblen_mutex, #counter, \ - (counter_type *)&dst->bool_val, counter_type); \ - } else { \ - emitter_col_t *base = \ - &col_##counter_type[mutex_counter_##base_counter]; \ - dst->counter_type##_val = \ - (counter_type)rate_per_second( \ - base->counter_type##_val, uptime); \ +#define OP(counter, counter_type, human, derived, base_counter) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + if (!derived) { \ + CTL_LEAF(mib, miblen_mutex, #counter, \ + (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = \ + &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = (counter_type)rate_per_second( \ + base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP @@ -256,12 +257,12 @@ mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 -#define OP(counter, type, human, derived, base_counter) \ - if (!derived) { \ - col = &col_##type[k_##type]; \ - ++k_##type; \ +#define OP(counter, type, human, derived, base_counter) \ + if (!derived) { \ + col = &col_##type[k_##type]; \ + ++k_##type; \ emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \ - (const void *)&col->bool_val); \ + (const void *)&col->bool_val); \ } MUTEX_PROF_COUNTERS; #undef OP @@ -269,44 +270,42 @@ mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, #undef EMITTER_TYPE_uint64_t } -#define COL_DECLARE(column_name) \ - emitter_col_t col_##column_name; +#define COL_DECLARE(column_name) emitter_col_t col_##column_name; -#define COL_INIT(row_name, column_name, left_or_right, col_width, etype)\ - emitter_col_init(&col_##column_name, &row_name); \ - col_##column_name.justify = emitter_justify_##left_or_right; \ - col_##column_name.width = col_width; \ +#define COL_INIT(row_name, column_name, left_or_right, col_width, etype) \ + emitter_col_init(&col_##column_name, &row_name); \ + col_##column_name.justify = emitter_justify_##left_or_right; \ + col_##column_name.width = col_width; \ col_##column_name.type = emitter_type_##etype; -#define COL(row_name, column_name, left_or_right, col_width, etype) \ - COL_DECLARE(column_name); \ +#define COL(row_name, column_name, left_or_right, col_width, etype) \ + COL_DECLARE(column_name); \ COL_INIT(row_name, column_name, left_or_right, col_width, etype) -#define COL_HDR_DECLARE(column_name) \ - COL_DECLARE(column_name); \ +#define COL_HDR_DECLARE(column_name) \ + COL_DECLARE(column_name); \ emitter_col_t header_##column_name; -#define COL_HDR_INIT(row_name, column_name, human, left_or_right, \ - col_width, etype) \ - COL_INIT(row_name, column_name, left_or_right, col_width, etype)\ - emitter_col_init(&header_##column_name, &header_##row_name); \ - header_##column_name.justify = emitter_justify_##left_or_right; \ - header_##column_name.width = col_width; \ - header_##column_name.type = emitter_type_title; \ +#define COL_HDR_INIT( \ + row_name, column_name, human, left_or_right, col_width, etype) \ + COL_INIT(row_name, column_name, left_or_right, col_width, etype) \ + emitter_col_init(&header_##column_name, &header_##row_name); \ + header_##column_name.justify = emitter_justify_##left_or_right; \ + header_##column_name.width = col_width; \ + header_##column_name.type = emitter_type_title; \ header_##column_name.str_val = human ? human : #column_name; -#define COL_HDR(row_name, column_name, human, left_or_right, col_width, \ - etype) \ - COL_HDR_DECLARE(column_name) \ - COL_HDR_INIT(row_name, column_name, human, left_or_right, \ - col_width, etype) +#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \ + COL_HDR_DECLARE(column_name) \ + COL_HDR_INIT( \ + row_name, column_name, human, left_or_right, col_width, etype) JEMALLOC_COLD static void -stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, - uint64_t uptime) { - size_t page; - bool in_gap, in_gap_prev; +stats_arena_bins_print( + emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) { + size_t page; + bool in_gap, in_gap_prev; unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); @@ -378,17 +377,17 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters]; if (mutex) { - mutex_stats_init_cols(&row, NULL, NULL, col_mutex64, - col_mutex32); - mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64, - header_mutex32); + mutex_stats_init_cols( + &row, NULL, NULL, col_mutex64, col_mutex32); + mutex_stats_init_cols( + &header_row, NULL, NULL, header_mutex64, header_mutex32); } /* * We print a "bins:" header as part of the table row; we need to adjust * the header size column to compensate. */ - header_size.width -=5; + header_size.width -= 5; emitter_table_printf(emitter, "bins:"); emitter_table_row(emitter, &header_row); emitter_json_array_kv_begin(emitter, "bins"); @@ -408,9 +407,9 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, for (j = 0, in_gap = false; j < nbins; j++) { uint64_t nslabs; - size_t reg_size, slab_size, curregs; - size_t curslabs; - size_t nonfull_slabs; + size_t reg_size, slab_size, curregs; + size_t curslabs; + size_t nonfull_slabs; uint32_t nregs, nshards; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nreslabs; @@ -440,8 +439,8 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, } if (in_gap_prev && !in_gap) { - emitter_table_printf(emitter, - " ---\n"); + emitter_table_printf( + emitter, " ---\n"); } if (in_gap && !emitter_outputs_json(emitter)) { @@ -455,8 +454,8 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t); CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t); CTL_LEAF(stats_arenas_mib, 5, "curregs", &curregs, size_t); - CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests, - uint64_t); + CTL_LEAF( + stats_arenas_mib, 5, "nrequests", &nrequests, uint64_t); CTL_LEAF(stats_arenas_mib, 5, "nfills", &nfills, uint64_t); CTL_LEAF(stats_arenas_mib, 5, "nflushes", &nflushes, uint64_t); CTL_LEAF(stats_arenas_mib, 5, "nreslabs", &nreslabs, uint64_t); @@ -464,12 +463,12 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, CTL_LEAF(stats_arenas_mib, 5, "nonfull_slabs", &nonfull_slabs, size_t); - CTL_LEAF(stats_arenas_mib, 5, "batch_pops", &batch_pops, - uint64_t); + CTL_LEAF( + stats_arenas_mib, 5, "batch_pops", &batch_pops, uint64_t); CTL_LEAF(stats_arenas_mib, 5, "batch_failed_pushes", &batch_failed_pushes, uint64_t); - CTL_LEAF(stats_arenas_mib, 5, "batch_pushes", - &batch_pushes, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "batch_pushes", &batch_pushes, + uint64_t); CTL_LEAF(stats_arenas_mib, 5, "batch_pushed_elems", &batch_pushed_elems, uint64_t); @@ -479,14 +478,14 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, } emitter_json_object_begin(emitter); - emitter_json_kv(emitter, "nmalloc", emitter_type_uint64, - &nmalloc); - emitter_json_kv(emitter, "ndalloc", emitter_type_uint64, - &ndalloc); - emitter_json_kv(emitter, "curregs", emitter_type_size, - &curregs); - emitter_json_kv(emitter, "nrequests", emitter_type_uint64, - &nrequests); + emitter_json_kv( + emitter, "nmalloc", emitter_type_uint64, &nmalloc); + emitter_json_kv( + emitter, "ndalloc", emitter_type_uint64, &ndalloc); + emitter_json_kv( + emitter, "curregs", emitter_type_size, &curregs); + emitter_json_kv( + emitter, "nrequests", emitter_type_uint64, &nrequests); if (prof_stats_on) { emitter_json_kv(emitter, "prof_live_requested", emitter_type_uint64, &prof_live.req_sum); @@ -497,36 +496,36 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, emitter_json_kv(emitter, "prof_accum_count", emitter_type_uint64, &prof_accum.count); } - emitter_json_kv(emitter, "nfills", emitter_type_uint64, - &nfills); - emitter_json_kv(emitter, "nflushes", emitter_type_uint64, - &nflushes); - emitter_json_kv(emitter, "nreslabs", emitter_type_uint64, - &nreslabs); - emitter_json_kv(emitter, "curslabs", emitter_type_size, - &curslabs); + emitter_json_kv( + emitter, "nfills", emitter_type_uint64, &nfills); + emitter_json_kv( + emitter, "nflushes", emitter_type_uint64, &nflushes); + emitter_json_kv( + emitter, "nreslabs", emitter_type_uint64, &nreslabs); + emitter_json_kv( + emitter, "curslabs", emitter_type_size, &curslabs); emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size, &nonfull_slabs); - emitter_json_kv(emitter, "batch_pops", - emitter_type_uint64, &batch_pops); + emitter_json_kv( + emitter, "batch_pops", emitter_type_uint64, &batch_pops); emitter_json_kv(emitter, "batch_failed_pushes", emitter_type_uint64, &batch_failed_pushes); - emitter_json_kv(emitter, "batch_pushes", - emitter_type_uint64, &batch_pushes); + emitter_json_kv(emitter, "batch_pushes", emitter_type_uint64, + &batch_pushes); emitter_json_kv(emitter, "batch_pushed_elems", emitter_type_uint64, &batch_pushed_elems); if (mutex) { emitter_json_object_kv_begin(emitter, "mutex"); - mutex_stats_emit(emitter, NULL, col_mutex64, - col_mutex32); + mutex_stats_emit( + emitter, NULL, col_mutex64, col_mutex32); emitter_json_object_end(emitter); } emitter_json_object_end(emitter); size_t availregs = nregs * curslabs; - char util[6]; - if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util)) - { + char util[6]; + if (get_rate_str( + (uint64_t)curregs, (uint64_t)availregs, util)) { if (availregs == 0) { malloc_snprintf(util, sizeof(util), "1"); } else if (curregs > availregs) { @@ -550,7 +549,8 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, col_ndalloc.uint64_val = ndalloc; col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); col_nrequests.uint64_val = nrequests; - col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); + col_nrequests_ps.uint64_val = rate_per_second( + nrequests, uptime); if (prof_stats_on) { col_prof_live_requested.uint64_val = prof_live.req_sum; col_prof_live_count.uint64_val = prof_live.count; @@ -574,19 +574,17 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, col_nreslabs_ps.uint64_val = rate_per_second(nreslabs, uptime); col_pops.uint64_val = batch_pops; - col_pops_ps.uint64_val - = rate_per_second(batch_pops, uptime); + col_pops_ps.uint64_val = rate_per_second(batch_pops, uptime); col_failed_push.uint64_val = batch_failed_pushes; - col_failed_push_ps.uint64_val - = rate_per_second(batch_failed_pushes, uptime); + col_failed_push_ps.uint64_val = rate_per_second( + batch_failed_pushes, uptime); col_push.uint64_val = batch_pushes; - col_push_ps.uint64_val - = rate_per_second(batch_pushes, uptime); + col_push_ps.uint64_val = rate_per_second(batch_pushes, uptime); col_push_elem.uint64_val = batch_pushed_elems; - col_push_elem_ps.uint64_val - = rate_per_second(batch_pushed_elems, uptime); + col_push_elem_ps.uint64_val = rate_per_second( + batch_pushed_elems, uptime); /* * Note that mutex columns were initialized above, if mutex == @@ -606,7 +604,7 @@ JEMALLOC_COLD static void stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { unsigned nbins, nlextents, j; - bool in_gap, in_gap_prev; + bool in_gap, in_gap_prev; CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlextents", &nlextents, unsigned); @@ -660,8 +658,8 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { } for (j = 0, in_gap = false; j < nlextents; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t lextent_size, curlextents; + uint64_t nmalloc, ndalloc, nrequests; + size_t lextent_size, curlextents; prof_stats_t prof_live; prof_stats_t prof_accum; @@ -670,20 +668,20 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t); CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t); - CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests, - uint64_t); + CTL_LEAF( + stats_arenas_mib, 5, "nrequests", &nrequests, uint64_t); in_gap_prev = in_gap; in_gap = (nrequests == 0); if (in_gap_prev && !in_gap) { - emitter_table_printf(emitter, - " ---\n"); + emitter_table_printf( + emitter, " ---\n"); } CTL_LEAF(arenas_lextent_mib, 3, "size", &lextent_size, size_t); - CTL_LEAF(stats_arenas_mib, 5, "curlextents", &curlextents, - size_t); + CTL_LEAF( + stats_arenas_mib, 5, "curlextents", &curlextents, size_t); if (prof_stats_on) { prof_stats_mib[3] = j; @@ -704,8 +702,8 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { emitter_json_kv(emitter, "prof_accum_count", emitter_type_uint64, &prof_accum.count); } - emitter_json_kv(emitter, "curlextents", emitter_type_size, - &curlextents); + emitter_json_kv( + emitter, "curlextents", emitter_type_size, &curlextents); emitter_json_object_end(emitter); col_size.size_val = lextent_size; @@ -716,7 +714,8 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { col_ndalloc.uint64_val = ndalloc; col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); col_nrequests.uint64_val = nrequests; - col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); + col_nrequests_ps.uint64_val = rate_per_second( + nrequests, uptime); if (prof_stats_on) { col_prof_live_requested.uint64_val = prof_live.req_sum; col_prof_live_count.uint64_val = prof_live.count; @@ -739,8 +738,8 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { JEMALLOC_COLD static void stats_arena_extents_print(emitter_t *emitter, unsigned i) { - unsigned j; - bool in_gap, in_gap_prev; + unsigned j; + bool in_gap, in_gap_prev; emitter_row_t header_row; emitter_row_init(&header_row); emitter_row_t row; @@ -777,12 +776,12 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) { CTL_LEAF(stats_arenas_mib, 5, "ndirty", &ndirty, size_t); CTL_LEAF(stats_arenas_mib, 5, "nmuzzy", &nmuzzy, size_t); CTL_LEAF(stats_arenas_mib, 5, "nretained", &nretained, size_t); - CTL_LEAF(stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes, + CTL_LEAF( + stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes, size_t); + CTL_LEAF( + stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes, size_t); + CTL_LEAF(stats_arenas_mib, 5, "retained_bytes", &retained_bytes, size_t); - CTL_LEAF(stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes, - size_t); - CTL_LEAF(stats_arenas_mib, 5, "retained_bytes", - &retained_bytes, size_t); total = ndirty + nmuzzy + nretained; total_bytes = dirty_bytes + muzzy_bytes + retained_bytes; @@ -791,20 +790,20 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) { in_gap = (total == 0); if (in_gap_prev && !in_gap) { - emitter_table_printf(emitter, - " ---\n"); + emitter_table_printf( + emitter, " ---\n"); } emitter_json_object_begin(emitter); emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty); emitter_json_kv(emitter, "nmuzzy", emitter_type_size, &nmuzzy); - emitter_json_kv(emitter, "nretained", emitter_type_size, - &nretained); + emitter_json_kv( + emitter, "nretained", emitter_type_size, &nretained); - emitter_json_kv(emitter, "dirty_bytes", emitter_type_size, - &dirty_bytes); - emitter_json_kv(emitter, "muzzy_bytes", emitter_type_size, - &muzzy_bytes); + emitter_json_kv( + emitter, "dirty_bytes", emitter_type_size, &dirty_bytes); + emitter_json_kv( + emitter, "muzzy_bytes", emitter_type_size, &muzzy_bytes); emitter_json_kv(emitter, "retained_bytes", emitter_type_size, &retained_bytes); emitter_json_object_end(emitter); @@ -839,8 +838,8 @@ stats_arena_hpa_shard_sec_print(emitter_t *emitter, unsigned i) { } static void -stats_arena_hpa_shard_counters_print(emitter_t *emitter, unsigned i, - uint64_t uptime) { +stats_arena_hpa_shard_counters_print( + emitter_t *emitter, unsigned i, uint64_t uptime) { size_t npageslabs; size_t nactive; size_t ndirty; @@ -860,39 +859,36 @@ stats_arena_hpa_shard_counters_print(emitter_t *emitter, unsigned i, uint64_t nhugify_failures; uint64_t ndehugifies; - CTL_M2_GET("stats.arenas.0.hpa_shard.npageslabs", - i, &npageslabs, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.nactive", - i, &nactive, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.ndirty", - i, &ndirty, size_t); + CTL_M2_GET( + "stats.arenas.0.hpa_shard.npageslabs", i, &npageslabs, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.nactive", i, &nactive, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.ndirty", i, &ndirty, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.npageslabs_nonhuge", - i, &npageslabs_nonhuge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.nactive_nonhuge", - i, &nactive_nonhuge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.ndirty_nonhuge", - i, &ndirty_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.npageslabs_nonhuge", i, + &npageslabs_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.nactive_nonhuge", i, + &nactive_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.ndirty_nonhuge", i, + &ndirty_nonhuge, size_t); nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES - nactive_nonhuge - ndirty_nonhuge; - CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.npageslabs_huge", - i, &npageslabs_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.nactive_huge", - i, &nactive_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.ndirty_huge", - i, &ndirty_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.npageslabs_huge", i, + &npageslabs_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.nactive_huge", i, + &nactive_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.ndirty_huge", i, + &ndirty_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes", - i, &npurge_passes, uint64_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.npurges", - i, &npurges, uint64_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies", - i, &nhugifies, uint64_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.nhugify_failures", - i, &nhugify_failures, uint64_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies", - i, &ndehugifies, uint64_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes", i, &npurge_passes, + uint64_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.npurges", i, &npurges, uint64_t); + CTL_M2_GET( + "stats.arenas.0.hpa_shard.nhugifies", i, &nhugifies, uint64_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.nhugify_failures", i, + &nhugify_failures, uint64_t); + CTL_M2_GET( + "stats.arenas.0.hpa_shard.ndehugifies", i, &ndehugifies, uint64_t); emitter_table_printf(emitter, "HPA shard stats:\n" @@ -900,56 +896,55 @@ stats_arena_hpa_shard_counters_print(emitter_t *emitter, unsigned i, " Active pages: %zu (%zu huge, %zu nonhuge)\n" " Dirty pages: %zu (%zu huge, %zu nonhuge)\n" " Retained pages: %zu\n" - " Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n" - " Purges: %" FMTu64 " (%" FMTu64 " / sec)\n" - " Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n" - " Hugify failures: %" FMTu64 " (%" FMTu64 " / sec)\n" - " Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n" + " Purge passes: %" FMTu64 " (%" FMTu64 + " / sec)\n" + " Purges: %" FMTu64 " (%" FMTu64 + " / sec)\n" + " Hugeifies: %" FMTu64 " (%" FMTu64 + " / sec)\n" + " Hugify failures: %" FMTu64 " (%" FMTu64 + " / sec)\n" + " Dehugifies: %" FMTu64 " (%" FMTu64 + " / sec)\n" "\n", - npageslabs, npageslabs_huge, npageslabs_nonhuge, - nactive, nactive_huge, nactive_nonhuge, - ndirty, ndirty_huge, ndirty_nonhuge, - nretained_nonhuge, - npurge_passes, rate_per_second(npurge_passes, uptime), - npurges, rate_per_second(npurges, uptime), - nhugifies, rate_per_second(nhugifies, uptime), - nhugify_failures, rate_per_second(nhugify_failures, uptime), - ndehugifies, rate_per_second(ndehugifies, uptime)); + npageslabs, npageslabs_huge, npageslabs_nonhuge, nactive, + nactive_huge, nactive_nonhuge, ndirty, ndirty_huge, ndirty_nonhuge, + nretained_nonhuge, npurge_passes, + rate_per_second(npurge_passes, uptime), npurges, + rate_per_second(npurges, uptime), nhugifies, + rate_per_second(nhugifies, uptime), nhugify_failures, + rate_per_second(nhugify_failures, uptime), ndehugifies, + rate_per_second(ndehugifies, uptime)); - emitter_json_kv(emitter, "npageslabs", emitter_type_size, - &npageslabs); - emitter_json_kv(emitter, "nactive", emitter_type_size, - &nactive); - emitter_json_kv(emitter, "ndirty", emitter_type_size, - &ndirty); + emitter_json_kv(emitter, "npageslabs", emitter_type_size, &npageslabs); + emitter_json_kv(emitter, "nactive", emitter_type_size, &nactive); + emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty); - emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64, - &npurge_passes); - emitter_json_kv(emitter, "npurges", emitter_type_uint64, - &npurges); - emitter_json_kv(emitter, "nhugifies", emitter_type_uint64, - &nhugifies); + emitter_json_kv( + emitter, "npurge_passes", emitter_type_uint64, &npurge_passes); + emitter_json_kv(emitter, "npurges", emitter_type_uint64, &npurges); + emitter_json_kv(emitter, "nhugifies", emitter_type_uint64, &nhugifies); emitter_json_kv(emitter, "nhugify_failures", emitter_type_uint64, &nhugify_failures); - emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64, - &ndehugifies); + emitter_json_kv( + emitter, "ndehugifies", emitter_type_uint64, &ndehugifies); emitter_json_object_kv_begin(emitter, "slabs"); emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size, &npageslabs_nonhuge); - emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size, - &nactive_nonhuge); - emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size, - &ndirty_nonhuge); + emitter_json_kv( + emitter, "nactive_nonhuge", emitter_type_size, &nactive_nonhuge); + emitter_json_kv( + emitter, "ndirty_nonhuge", emitter_type_size, &ndirty_nonhuge); emitter_json_kv(emitter, "nretained_nonhuge", emitter_type_size, &nretained_nonhuge); - emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size, - &npageslabs_huge); - emitter_json_kv(emitter, "nactive_huge", emitter_type_size, - &nactive_huge); - emitter_json_kv(emitter, "ndirty_huge", emitter_type_size, - &ndirty_huge); + emitter_json_kv( + emitter, "npageslabs_huge", emitter_type_size, &npageslabs_huge); + emitter_json_kv( + emitter, "nactive_huge", emitter_type_size, &nactive_huge); + emitter_json_kv( + emitter, "ndirty_huge", emitter_type_size, &ndirty_huge); emitter_json_object_end(emitter); /* End "slabs" */ } @@ -970,19 +965,19 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) { size_t nretained_nonhuge; /* Full slab stats. */ - CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge", - i, &npageslabs_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge", - i, &nactive_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge", - i, &ndirty_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge", i, + &npageslabs_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge", i, + &nactive_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge", i, + &ndirty_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge", - i, &npageslabs_nonhuge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge", - i, &nactive_nonhuge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge", - i, &ndirty_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge", i, + &npageslabs_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge", i, + &nactive_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge", i, + &ndirty_nonhuge, size_t); nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES - nactive_nonhuge - ndirty_nonhuge; @@ -992,40 +987,38 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) { " nactive: %zu huge, %zu nonhuge \n" " ndirty: %zu huge, %zu nonhuge \n" " nretained: 0 huge, %zu nonhuge \n", - npageslabs_huge, npageslabs_nonhuge, - nactive_huge, nactive_nonhuge, - ndirty_huge, ndirty_nonhuge, - nretained_nonhuge); + npageslabs_huge, npageslabs_nonhuge, nactive_huge, nactive_nonhuge, + ndirty_huge, ndirty_nonhuge, nretained_nonhuge); emitter_json_object_kv_begin(emitter, "full_slabs"); - emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size, - &npageslabs_huge); - emitter_json_kv(emitter, "nactive_huge", emitter_type_size, - &nactive_huge); - emitter_json_kv(emitter, "nactive_huge", emitter_type_size, - &nactive_huge); + emitter_json_kv( + emitter, "npageslabs_huge", emitter_type_size, &npageslabs_huge); + emitter_json_kv( + emitter, "nactive_huge", emitter_type_size, &nactive_huge); + emitter_json_kv( + emitter, "nactive_huge", emitter_type_size, &nactive_huge); emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size, &npageslabs_nonhuge); - emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size, - &nactive_nonhuge); - emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size, - &ndirty_nonhuge); + emitter_json_kv( + emitter, "nactive_nonhuge", emitter_type_size, &nactive_nonhuge); + emitter_json_kv( + emitter, "ndirty_nonhuge", emitter_type_size, &ndirty_nonhuge); emitter_json_object_end(emitter); /* End "full_slabs" */ /* Next, empty slab stats. */ - CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge", - i, &npageslabs_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge", - i, &nactive_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", - i, &ndirty_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge", i, + &npageslabs_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge", i, + &nactive_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", i, + &ndirty_huge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge", - i, &npageslabs_nonhuge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge", - i, &nactive_nonhuge, size_t); - CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", - i, &ndirty_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge", i, + &npageslabs_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge", i, + &nactive_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", i, + &ndirty_nonhuge, size_t); nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES - nactive_nonhuge - ndirty_nonhuge; @@ -1035,24 +1028,22 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) { " nactive: %zu huge, %zu nonhuge \n" " ndirty: %zu huge, %zu nonhuge \n" " nretained: 0 huge, %zu nonhuge \n", - npageslabs_huge, npageslabs_nonhuge, - nactive_huge, nactive_nonhuge, - ndirty_huge, ndirty_nonhuge, - nretained_nonhuge); + npageslabs_huge, npageslabs_nonhuge, nactive_huge, nactive_nonhuge, + ndirty_huge, ndirty_nonhuge, nretained_nonhuge); emitter_json_object_kv_begin(emitter, "empty_slabs"); - emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size, - &npageslabs_huge); - emitter_json_kv(emitter, "nactive_huge", emitter_type_size, - &nactive_huge); - emitter_json_kv(emitter, "nactive_huge", emitter_type_size, - &nactive_huge); + emitter_json_kv( + emitter, "npageslabs_huge", emitter_type_size, &npageslabs_huge); + emitter_json_kv( + emitter, "nactive_huge", emitter_type_size, &nactive_huge); + emitter_json_kv( + emitter, "nactive_huge", emitter_type_size, &nactive_huge); emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size, &npageslabs_nonhuge); - emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size, - &nactive_nonhuge); - emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size, - &ndirty_nonhuge); + emitter_json_kv( + emitter, "nactive_nonhuge", emitter_type_size, &nactive_nonhuge); + emitter_json_kv( + emitter, "ndirty_nonhuge", emitter_type_size, &ndirty_nonhuge); emitter_json_object_end(emitter); /* End "empty_slabs" */ /* Last, nonfull slab stats. */ @@ -1080,25 +1071,25 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) { CTL_LEAF(stats_arenas_mib, 6, "npageslabs_huge", &npageslabs_huge, size_t); - CTL_LEAF(stats_arenas_mib, 6, "nactive_huge", - &nactive_huge, size_t); - CTL_LEAF(stats_arenas_mib, 6, "ndirty_huge", - &ndirty_huge, size_t); + CTL_LEAF( + stats_arenas_mib, 6, "nactive_huge", &nactive_huge, size_t); + CTL_LEAF( + stats_arenas_mib, 6, "ndirty_huge", &ndirty_huge, size_t); CTL_LEAF(stats_arenas_mib, 6, "npageslabs_nonhuge", &npageslabs_nonhuge, size_t); CTL_LEAF(stats_arenas_mib, 6, "nactive_nonhuge", &nactive_nonhuge, size_t); - CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge", - &ndirty_nonhuge, size_t); + CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge", &ndirty_nonhuge, + size_t); nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES - nactive_nonhuge - ndirty_nonhuge; bool in_gap_prev = in_gap; in_gap = (npageslabs_huge == 0 && npageslabs_nonhuge == 0); if (in_gap_prev && !in_gap) { - emitter_table_printf(emitter, - " ---\n"); + emitter_table_printf( + emitter, " ---\n"); } col_size.size_val = sz_pind2sz(j); @@ -1117,12 +1108,12 @@ stats_arena_hpa_shard_slabs_print(emitter_t *emitter, unsigned i) { emitter_json_object_begin(emitter); emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size, &npageslabs_huge); - emitter_json_kv(emitter, "nactive_huge", emitter_type_size, - &nactive_huge); - emitter_json_kv(emitter, "ndirty_huge", emitter_type_size, - &ndirty_huge); - emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size, - &npageslabs_nonhuge); + emitter_json_kv( + emitter, "nactive_huge", emitter_type_size, &nactive_huge); + emitter_json_kv( + emitter, "ndirty_huge", emitter_type_size, &ndirty_huge); + emitter_json_kv(emitter, "npageslabs_nonhuge", + emitter_type_size, &npageslabs_nonhuge); emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size, &nactive_nonhuge); emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size, @@ -1146,7 +1137,8 @@ stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) { } static void -stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) { +stats_arena_mutexes_print( + emitter_t *emitter, unsigned arena_ind, uint64_t uptime) { emitter_row_t row; emitter_col_t col_name; emitter_col_t col64[mutex_prof_num_uint64_t_counters]; @@ -1164,11 +1156,11 @@ stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptim CTL_LEAF_PREPARE(stats_arenas_mib, 3, "mutexes"); for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes; - i++) { + i++) { const char *name = arena_mutex_names[i]; emitter_json_object_kv_begin(emitter, name); - mutex_stats_read_arena(stats_arenas_mib, 4, name, &col_name, - col64, col32, uptime); + mutex_stats_read_arena( + stats_arenas_mib, 4, name, &col_name, col64, col32, uptime); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_object_end(emitter); /* Close the mutex dict. */ } @@ -1179,29 +1171,30 @@ JEMALLOC_COLD static void stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, bool mutex, bool extents, bool hpa) { - char name[ARENA_NAME_LEN]; - char *namep = name; - unsigned nthreads; + char name[ARENA_NAME_LEN]; + char *namep = name; + unsigned nthreads; const char *dss; - ssize_t dirty_decay_ms, muzzy_decay_ms; - size_t page, pactive, pdirty, pmuzzy, mapped, retained; - size_t base, internal, resident, metadata_edata, metadata_rtree, + ssize_t dirty_decay_ms, muzzy_decay_ms; + size_t page, pactive, pdirty, pmuzzy, mapped, retained; + size_t base, internal, resident, metadata_edata, metadata_rtree, metadata_thp, extent_avail; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; - size_t small_allocated; + size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills, small_nflushes; - size_t large_allocated; + size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills, large_nflushes; - size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm; + size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm; uint64_t uptime; CTL_GET("arenas.page", &page, size_t); if (i != MALLCTL_ARENAS_ALL && i != MALLCTL_ARENAS_DESTROYED) { CTL_M1_GET("arena.0.name", i, (void *)&namep, const char *); - emitter_kv(emitter, "name", "name", emitter_type_string, &namep); + emitter_kv( + emitter, "name", "name", emitter_type_string, &namep); } CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); @@ -1209,55 +1202,55 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, emitter_type_unsigned, &nthreads); CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); - emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64, - &uptime); + emitter_kv( + emitter, "uptime_ns", "uptime", emitter_type_uint64, &uptime); CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); emitter_kv(emitter, "dss", "dss allocation precedence", emitter_type_string, &dss); - CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, - ssize_t); - CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, - ssize_t); + CTL_M2_GET( + "stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, ssize_t); + CTL_M2_GET( + "stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, ssize_t); CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); - CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, - uint64_t); + CTL_M2_GET( + "stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); - CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, - uint64_t); + CTL_M2_GET( + "stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); emitter_row_t decay_row; emitter_row_init(&decay_row); /* JSON-style emission. */ - emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, - &dirty_decay_ms); - emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, - &muzzy_decay_ms); + emitter_json_kv( + emitter, "dirty_decay_ms", emitter_type_ssize, &dirty_decay_ms); + emitter_json_kv( + emitter, "muzzy_decay_ms", emitter_type_ssize, &muzzy_decay_ms); emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive); emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty); emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy); - emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64, - &dirty_npurge); - emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64, - &dirty_nmadvise); - emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64, - &dirty_purged); + emitter_json_kv( + emitter, "dirty_npurge", emitter_type_uint64, &dirty_npurge); + emitter_json_kv( + emitter, "dirty_nmadvise", emitter_type_uint64, &dirty_nmadvise); + emitter_json_kv( + emitter, "dirty_purged", emitter_type_uint64, &dirty_purged); - emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64, - &muzzy_npurge); - emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64, - &muzzy_nmadvise); - emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64, - &muzzy_purged); + emitter_json_kv( + emitter, "muzzy_npurge", emitter_type_uint64, &muzzy_npurge); + emitter_json_kv( + emitter, "muzzy_nmadvise", emitter_type_uint64, &muzzy_nmadvise); + emitter_json_kv( + emitter, "muzzy_purged", emitter_type_uint64, &muzzy_purged); /* Table-style emission. */ COL(decay_row, decay_type, right, 9, title); @@ -1374,12 +1367,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, col_count_nfills_ps.type = emitter_type_uint64; col_count_nflushes_ps.type = emitter_type_uint64; -#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ - CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ - &small_or_large##_##name, valtype##_t); \ - emitter_json_kv(emitter, #name, emitter_type_##valtype, \ - &small_or_large##_##name); \ - col_count_##name.type = emitter_type_##valtype; \ +#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ + CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ + &small_or_large##_##name, valtype##_t); \ + emitter_json_kv( \ + emitter, #name, emitter_type_##valtype, &small_or_large##_##name); \ + col_count_##name.type = emitter_type_##valtype; \ col_count_##name.valtype##_val = small_or_large##_##name; emitter_json_object_kv_begin(emitter, "small"); @@ -1387,20 +1380,20 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, GET_AND_EMIT_ALLOC_STAT(small, allocated, size) GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64) - col_count_nmalloc_ps.uint64_val = - rate_per_second(col_count_nmalloc.uint64_val, uptime); + col_count_nmalloc_ps.uint64_val = rate_per_second( + col_count_nmalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64) - col_count_ndalloc_ps.uint64_val = - rate_per_second(col_count_ndalloc.uint64_val, uptime); + col_count_ndalloc_ps.uint64_val = rate_per_second( + col_count_ndalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64) - col_count_nrequests_ps.uint64_val = - rate_per_second(col_count_nrequests.uint64_val, uptime); + col_count_nrequests_ps.uint64_val = rate_per_second( + col_count_nrequests.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(small, nfills, uint64) - col_count_nfills_ps.uint64_val = - rate_per_second(col_count_nfills.uint64_val, uptime); + col_count_nfills_ps.uint64_val = rate_per_second( + col_count_nfills.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(small, nflushes, uint64) - col_count_nflushes_ps.uint64_val = - rate_per_second(col_count_nflushes.uint64_val, uptime); + col_count_nflushes_ps.uint64_val = rate_per_second( + col_count_nflushes.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_json_object_end(emitter); /* Close "small". */ @@ -1410,20 +1403,20 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, GET_AND_EMIT_ALLOC_STAT(large, allocated, size) GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64) - col_count_nmalloc_ps.uint64_val = - rate_per_second(col_count_nmalloc.uint64_val, uptime); + col_count_nmalloc_ps.uint64_val = rate_per_second( + col_count_nmalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64) - col_count_ndalloc_ps.uint64_val = - rate_per_second(col_count_ndalloc.uint64_val, uptime); + col_count_ndalloc_ps.uint64_val = rate_per_second( + col_count_ndalloc.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64) - col_count_nrequests_ps.uint64_val = - rate_per_second(col_count_nrequests.uint64_val, uptime); + col_count_nrequests_ps.uint64_val = rate_per_second( + col_count_nrequests.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(large, nfills, uint64) - col_count_nfills_ps.uint64_val = - rate_per_second(col_count_nfills.uint64_val, uptime); + col_count_nfills_ps.uint64_val = rate_per_second( + col_count_nfills.uint64_val, uptime); GET_AND_EMIT_ALLOC_STAT(large, nflushes, uint64) - col_count_nflushes_ps.uint64_val = - rate_per_second(col_count_nflushes.uint64_val, uptime); + col_count_nflushes_ps.uint64_val = rate_per_second( + col_count_nflushes.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_json_object_end(emitter); /* Close "large". */ @@ -1438,16 +1431,16 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, col_count_nrequests.uint64_val = small_nrequests + large_nrequests; col_count_nfills.uint64_val = small_nfills + large_nfills; col_count_nflushes.uint64_val = small_nflushes + large_nflushes; - col_count_nmalloc_ps.uint64_val = - rate_per_second(col_count_nmalloc.uint64_val, uptime); - col_count_ndalloc_ps.uint64_val = - rate_per_second(col_count_ndalloc.uint64_val, uptime); - col_count_nrequests_ps.uint64_val = - rate_per_second(col_count_nrequests.uint64_val, uptime); - col_count_nfills_ps.uint64_val = - rate_per_second(col_count_nfills.uint64_val, uptime); - col_count_nflushes_ps.uint64_val = - rate_per_second(col_count_nflushes.uint64_val, uptime); + col_count_nmalloc_ps.uint64_val = rate_per_second( + col_count_nmalloc.uint64_val, uptime); + col_count_ndalloc_ps.uint64_val = rate_per_second( + col_count_ndalloc.uint64_val, uptime); + col_count_nrequests_ps.uint64_val = rate_per_second( + col_count_nrequests.uint64_val, uptime); + col_count_nfills_ps.uint64_val = rate_per_second( + col_count_nfills.uint64_val, uptime); + col_count_nflushes_ps.uint64_val = rate_per_second( + col_count_nflushes.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_row_t mem_count_row; @@ -1475,11 +1468,11 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, mem_count_val.size_val = pactive * page; emitter_table_row(emitter, &mem_count_row); -#define GET_AND_EMIT_MEM_STAT(stat) \ - CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \ - emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \ - mem_count_title.str_val = #stat":"; \ - mem_count_val.size_val = stat; \ +#define GET_AND_EMIT_MEM_STAT(stat) \ + CTL_M2_GET("stats.arenas.0." #stat, i, &stat, size_t); \ + emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \ + mem_count_title.str_val = #stat ":"; \ + mem_count_val.size_val = stat; \ emitter_table_row(emitter, &mem_count_row); GET_AND_EMIT_MEM_STAT(mapped) @@ -1517,13 +1510,13 @@ JEMALLOC_COLD static void stats_general_print(emitter_t *emitter) { const char *cpv; - bool bv, bv2; - unsigned uv; - uint32_t u32v; - uint64_t u64v; - int64_t i64v; - ssize_t ssv, ssv2; - size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz; + bool bv, bv2; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + int64_t i64v; + ssize_t ssv, ssv2; + size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz; bsz = sizeof(bool); usz = sizeof(unsigned); @@ -1539,11 +1532,11 @@ stats_general_print(emitter_t *emitter) { /* config. */ emitter_dict_begin(emitter, "config", "Build-time option settings"); -#define CONFIG_WRITE_BOOL(name) \ - do { \ - CTL_GET("config."#name, &bv, bool); \ - emitter_kv(emitter, #name, "config."#name, \ - emitter_type_bool, &bv); \ +#define CONFIG_WRITE_BOOL(name) \ + do { \ + CTL_GET("config." #name, &bv, bool); \ + emitter_kv( \ + emitter, #name, "config." #name, emitter_type_bool, &bv); \ } while (0) CONFIG_WRITE_BOOL(cache_oblivious); @@ -1565,45 +1558,33 @@ stats_general_print(emitter_t *emitter) { emitter_dict_end(emitter); /* Close "config" dict. */ /* opt. */ -#define OPT_WRITE(name, var, size, emitter_type) \ - if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \ - 0) { \ - emitter_kv(emitter, name, "opt."name, emitter_type, \ - &var); \ +#define OPT_WRITE(name, var, size, emitter_type) \ + if (je_mallctl("opt." name, (void *)&var, &size, NULL, 0) == 0) { \ + emitter_kv(emitter, name, "opt." name, emitter_type, &var); \ } -#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \ - altname) \ - if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \ - 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \ - == 0) { \ - emitter_kv_note(emitter, name, "opt."name, \ - emitter_type, &var1, altname, emitter_type, \ - &var2); \ +#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, altname) \ + if (je_mallctl("opt." name, (void *)&var1, &size, NULL, 0) == 0 \ + && je_mallctl(altname, (void *)&var2, &size, NULL, 0) == 0) { \ + emitter_kv_note(emitter, name, "opt." name, emitter_type, \ + &var1, altname, emitter_type, &var2); \ } #define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool) -#define OPT_WRITE_BOOL_MUTABLE(name, altname) \ +#define OPT_WRITE_BOOL_MUTABLE(name, altname) \ OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname) -#define OPT_WRITE_UNSIGNED(name) \ - OPT_WRITE(name, uv, usz, emitter_type_unsigned) +#define OPT_WRITE_UNSIGNED(name) OPT_WRITE(name, uv, usz, emitter_type_unsigned) -#define OPT_WRITE_INT64(name) \ - OPT_WRITE(name, i64v, i64sz, emitter_type_int64) -#define OPT_WRITE_UINT64(name) \ - OPT_WRITE(name, u64v, u64sz, emitter_type_uint64) +#define OPT_WRITE_INT64(name) OPT_WRITE(name, i64v, i64sz, emitter_type_int64) +#define OPT_WRITE_UINT64(name) OPT_WRITE(name, u64v, u64sz, emitter_type_uint64) -#define OPT_WRITE_SIZE_T(name) \ - OPT_WRITE(name, sv, ssz, emitter_type_size) -#define OPT_WRITE_SSIZE_T(name) \ - OPT_WRITE(name, ssv, sssz, emitter_type_ssize) -#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \ - OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \ - altname) +#define OPT_WRITE_SIZE_T(name) OPT_WRITE(name, sv, ssz, emitter_type_size) +#define OPT_WRITE_SSIZE_T(name) OPT_WRITE(name, ssv, sssz, emitter_type_ssize) +#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \ + OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, altname) -#define OPT_WRITE_CHAR_P(name) \ - OPT_WRITE(name, cpv, cpsz, emitter_type_string) +#define OPT_WRITE_CHAR_P(name) OPT_WRITE(name, cpv, cpsz, emitter_type_string) emitter_dict_begin(emitter, "opt", "Run-time option settings"); @@ -1623,21 +1604,24 @@ stats_general_print(emitter_t *emitter) { * Note: The outputs are strictly ordered by priorities (low -> high). * */ -#define MALLOC_CONF_WRITE(name, message) \ - if (je_mallctl("opt.malloc_conf."name, (void *)&cpv, &cpsz, NULL, 0) != \ - 0) { \ - cpv = ""; \ - } \ - emitter_kv(emitter, name, message, emitter_type_string, &cpv); +#define MALLOC_CONF_WRITE(name, message) \ + if (je_mallctl("opt.malloc_conf." name, (void *)&cpv, &cpsz, NULL, 0) \ + != 0) { \ + cpv = ""; \ + } \ + emitter_kv(emitter, name, message, emitter_type_string, &cpv); MALLOC_CONF_WRITE("global_var", "Global variable malloc_conf"); MALLOC_CONF_WRITE("symlink", "Symbolic link malloc.conf"); MALLOC_CONF_WRITE("env_var", "Environment variable MALLOC_CONF"); /* As this config is unofficial, skip the output if it's NULL */ - if (je_mallctl("opt.malloc_conf.global_var_2_conf_harder", - (void *)&cpv, &cpsz, NULL, 0) == 0) { - emitter_kv(emitter, "global_var_2_conf_harder", "Global " - "variable malloc_conf_2_conf_harder", emitter_type_string, &cpv); + if (je_mallctl("opt.malloc_conf.global_var_2_conf_harder", (void *)&cpv, + &cpsz, NULL, 0) + == 0) { + emitter_kv(emitter, "global_var_2_conf_harder", + "Global " + "variable malloc_conf_2_conf_harder", + emitter_type_string, &cpv); } #undef MALLOC_CONF_WRITE @@ -1712,8 +1696,8 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_UNSIGNED("prof_bt_max") OPT_WRITE_CHAR_P("prof_prefix") OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active") - OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init", - "prof.thread_active_init") + OPT_WRITE_BOOL_MUTABLE( + "prof_thread_active_init", "prof.thread_active_init") OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample") OPT_WRITE_BOOL("prof_accum") OPT_WRITE_SSIZE_T("lg_prof_interval") @@ -1751,12 +1735,12 @@ stats_general_print(emitter_t *emitter) { "prof.thread_active_init", emitter_type_bool, &bv); CTL_GET("prof.active", &bv, bool); - emitter_kv(emitter, "active", "prof.active", emitter_type_bool, - &bv); + emitter_kv( + emitter, "active", "prof.active", emitter_type_bool, &bv); CTL_GET("prof.gdump", &bv, bool); - emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool, - &bv); + emitter_kv( + emitter, "gdump", "prof.gdump", emitter_type_bool, &bv); CTL_GET("prof.interval", &u64v, uint64_t); emitter_kv(emitter, "interval", "prof.interval", @@ -1796,8 +1780,8 @@ stats_general_print(emitter_t *emitter) { emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv); CTL_GET("arenas.hugepage", &sv, size_t); - emitter_kv(emitter, "hugepage", "Hugepage size", emitter_type_size, - &sv); + emitter_kv( + emitter, "hugepage", "Hugepage size", emitter_type_size, &sv); if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { emitter_kv(emitter, "tcache_max", @@ -1827,20 +1811,20 @@ stats_general_print(emitter_t *emitter) { emitter_json_object_begin(emitter); CTL_LEAF(arenas_bin_mib, 3, "size", &sv, size_t); - emitter_json_kv(emitter, "size", emitter_type_size, - &sv); + emitter_json_kv( + emitter, "size", emitter_type_size, &sv); CTL_LEAF(arenas_bin_mib, 3, "nregs", &u32v, uint32_t); - emitter_json_kv(emitter, "nregs", emitter_type_uint32, - &u32v); + emitter_json_kv( + emitter, "nregs", emitter_type_uint32, &u32v); CTL_LEAF(arenas_bin_mib, 3, "slab_size", &sv, size_t); - emitter_json_kv(emitter, "slab_size", emitter_type_size, - &sv); + emitter_json_kv( + emitter, "slab_size", emitter_type_size, &sv); CTL_LEAF(arenas_bin_mib, 3, "nshards", &u32v, uint32_t); - emitter_json_kv(emitter, "nshards", emitter_type_uint32, - &u32v); + emitter_json_kv( + emitter, "nshards", emitter_type_uint32, &u32v); emitter_json_object_end(emitter); } @@ -1861,8 +1845,8 @@ stats_general_print(emitter_t *emitter) { emitter_json_object_begin(emitter); CTL_LEAF(arenas_lextent_mib, 3, "size", &sv, size_t); - emitter_json_kv(emitter, "size", emitter_type_size, - &sv); + emitter_json_kv( + emitter, "size", emitter_type_size, &sv); emitter_json_object_end(emitter); } @@ -1882,8 +1866,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, */ size_t allocated, active, metadata, metadata_edata, metadata_rtree, metadata_thp, resident, mapped, retained; - size_t num_background_threads; - size_t zero_reallocs; + size_t num_background_threads; + size_t zero_reallocs; uint64_t background_thread_num_runs, background_thread_run_interval; CTL_GET("stats.allocated", &allocated, size_t); @@ -1916,23 +1900,24 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated); emitter_json_kv(emitter, "active", emitter_type_size, &active); emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata); - emitter_json_kv(emitter, "metadata_edata", emitter_type_size, - &metadata_edata); - emitter_json_kv(emitter, "metadata_rtree", emitter_type_size, - &metadata_rtree); - emitter_json_kv(emitter, "metadata_thp", emitter_type_size, - &metadata_thp); + emitter_json_kv( + emitter, "metadata_edata", emitter_type_size, &metadata_edata); + emitter_json_kv( + emitter, "metadata_rtree", emitter_type_size, &metadata_rtree); + emitter_json_kv( + emitter, "metadata_thp", emitter_type_size, &metadata_thp); emitter_json_kv(emitter, "resident", emitter_type_size, &resident); emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); emitter_json_kv(emitter, "retained", emitter_type_size, &retained); - emitter_json_kv(emitter, "zero_reallocs", emitter_type_size, - &zero_reallocs); + emitter_json_kv( + emitter, "zero_reallocs", emitter_type_size, &zero_reallocs); - emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " + emitter_table_printf(emitter, + "Allocated: %zu, active: %zu, " "metadata: %zu (n_thp %zu, edata %zu, rtree %zu), resident: %zu, " - "mapped: %zu, retained: %zu\n", allocated, active, metadata, - metadata_thp, metadata_edata, metadata_rtree, resident, mapped, - retained); + "mapped: %zu, retained: %zu\n", + allocated, active, metadata, metadata_thp, metadata_edata, + metadata_rtree, resident, mapped, retained); /* Strange behaviors */ emitter_table_printf(emitter, @@ -1940,16 +1925,17 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, /* Background thread stats. */ emitter_json_object_kv_begin(emitter, "background_thread"); - emitter_json_kv(emitter, "num_threads", emitter_type_size, - &num_background_threads); + emitter_json_kv( + emitter, "num_threads", emitter_type_size, &num_background_threads); emitter_json_kv(emitter, "num_runs", emitter_type_uint64, &background_thread_num_runs); emitter_json_kv(emitter, "run_interval", emitter_type_uint64, &background_thread_run_interval); emitter_json_object_end(emitter); /* Close "background_thread". */ - emitter_table_printf(emitter, "Background threads: %zu, " - "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n", + emitter_table_printf(emitter, + "Background threads: %zu, " + "num_runs: %" FMTu64 ", run_interval: %" FMTu64 " ns\n", num_background_threads, background_thread_num_runs, background_thread_run_interval); @@ -1958,7 +1944,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_col_t name; emitter_col_t col64[mutex_prof_num_uint64_t_counters]; emitter_col_t col32[mutex_prof_num_uint32_t_counters]; - uint64_t uptime; + uint64_t uptime; emitter_row_init(&row); mutex_stats_init_cols(&row, "", &name, col64, col32); @@ -1973,7 +1959,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, for (int i = 0; i < mutex_prof_num_global_mutexes; i++) { mutex_stats_read_global(stats_mutexes_mib, 2, global_mutex_names[i], &name, col64, col32, uptime); - emitter_json_object_kv_begin(emitter, global_mutex_names[i]); + emitter_json_object_kv_begin( + emitter, global_mutex_names[i]); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_object_end(emitter); } @@ -1993,23 +1980,23 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, size_t miblen = sizeof(mib) / sizeof(size_t); size_t sz; VARIABLE_ARRAY_UNSAFE(bool, initialized, narenas); - bool destroyed_initialized; + bool destroyed_initialized; unsigned i, ninitialized; xmallctlnametomib("arena.0.initialized", mib, &miblen); for (i = ninitialized = 0; i < narenas; i++) { mib[1] = i; sz = sizeof(bool); - xmallctlbymib(mib, miblen, &initialized[i], &sz, - NULL, 0); + xmallctlbymib( + mib, miblen, &initialized[i], &sz, NULL, 0); if (initialized[i]) { ninitialized++; } } mib[1] = MALLCTL_ARENAS_DESTROYED; sz = sizeof(bool); - xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, - NULL, 0); + xmallctlbymib( + mib, miblen, &destroyed_initialized, &sz, NULL, 0); /* Merged stats. */ if (merged && (ninitialized > 1 || !unmerged)) { @@ -2024,12 +2011,13 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, /* Destroyed stats. */ if (destroyed_initialized && destroyed) { /* Print destroyed arena stats. */ - emitter_table_printf(emitter, - "Destroyed arenas stats:\n"); + emitter_table_printf( + emitter, "Destroyed arenas stats:\n"); emitter_json_object_kv_begin(emitter, "destroyed"); stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED, bins, large, mutex, extents, hpa); - emitter_json_object_end(emitter); /* Close "destroyed". */ + emitter_json_object_end( + emitter); /* Close "destroyed". */ } /* Unmerged stats. */ @@ -2039,8 +2027,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, char arena_ind_str[20]; malloc_snprintf(arena_ind_str, sizeof(arena_ind_str), "%u", i); - emitter_json_object_kv_begin(emitter, - arena_ind_str); + emitter_json_object_kv_begin( + emitter, arena_ind_str); emitter_table_printf(emitter, "arenas[%s]:\n", arena_ind_str); stats_arena_print(emitter, i, bins, @@ -2056,9 +2044,9 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) { - int err; + int err; uint64_t epoch; - size_t u64sz; + size_t u64sz; #define OPTION(o, v, d, s) bool v = d; STATS_PRINT_OPTIONS #undef OPTION @@ -2072,15 +2060,17 @@ stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) { * */ epoch = 1; u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, - sizeof(uint64_t)); + err = je_mallctl( + "epoch", (void *)&epoch, &u64sz, (void *)&epoch, sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { - malloc_write(": Memory allocation failure in " + malloc_write( + ": Memory allocation failure in " "mallctl(\"epoch\", ...)\n"); return; } - malloc_write(": Failure in mallctl(\"epoch\", " + malloc_write( + ": Failure in mallctl(\"epoch\", " "...)\n"); abort(); } @@ -2088,7 +2078,10 @@ stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) { if (opts != NULL) { for (unsigned i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { -#define OPTION(o, v, d, s) case o: v = s; break; +#define OPTION(o, v, d, s) \ + case o: \ + v = s; \ + break; STATS_PRINT_OPTIONS #undef OPTION default:; @@ -2098,8 +2091,8 @@ stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) { emitter_t emitter; emitter_init(&emitter, - json ? emitter_output_json_compact : emitter_output_table, - write_cb, cbopaque); + json ? emitter_output_json_compact : emitter_output_table, write_cb, + cbopaque); emitter_begin(&emitter); emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n"); emitter_json_object_kv_begin(&emitter, "jemalloc"); @@ -2108,8 +2101,8 @@ stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) { stats_general_print(&emitter); } if (config_stats) { - stats_print_helper(&emitter, merged, destroyed, unmerged, - bins, large, mutex, extents, hpa); + stats_print_helper(&emitter, merged, destroyed, unmerged, bins, + large, mutex, extents, hpa); } emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */ @@ -2135,8 +2128,8 @@ stats_interval_event_handler(tsd_t *tsd) { uint64_t elapsed = last_event - last_sample_event; assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED); - if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated, - elapsed)) { + if (counter_accum( + tsd_tsdn(tsd), &stats_interval_accumulated, elapsed)) { je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts); } } @@ -2147,10 +2140,10 @@ stats_interval_enabled(void) { } te_base_cb_t stats_interval_te_handler = { - .enabled = &stats_interval_enabled, - .new_event_wait = &stats_interval_new_event_wait, - .postponed_event_wait = &stats_interval_postponed_event_wait, - .event_handler = &stats_interval_event_handler, + .enabled = &stats_interval_enabled, + .new_event_wait = &stats_interval_new_event_wait, + .postponed_event_wait = &stats_interval_postponed_event_wait, + .event_handler = &stats_interval_event_handler, }; bool @@ -2160,12 +2153,12 @@ stats_boot(void) { assert(opt_stats_interval == -1); stats_interval = 0; stats_interval_accum_batch = 0; - } else{ + } else { /* See comments in stats.h */ - stats_interval = (opt_stats_interval > 0) ? - opt_stats_interval : 1; - uint64_t batch = stats_interval >> - STATS_INTERVAL_ACCUM_LG_BATCH_SIZE; + stats_interval = (opt_stats_interval > 0) ? opt_stats_interval + : 1; + uint64_t batch = stats_interval + >> STATS_INTERVAL_ACCUM_LG_BATCH_SIZE; if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) { batch = STATS_INTERVAL_ACCUM_BATCH_MAX; } else if (batch == 0) { diff --git a/src/sz.c b/src/sz.c index 89def9d5..4a4c057d 100644 --- a/src/sz.c +++ b/src/sz.c @@ -3,12 +3,12 @@ #include "jemalloc/internal/sz.h" JEMALLOC_ALIGNED(CACHELINE) -size_t sz_pind2sz_tab[SC_NPSIZES+1]; +size_t sz_pind2sz_tab[SC_NPSIZES + 1]; size_t sz_large_pad; size_t sz_psz_quantize_floor(size_t size) { - size_t ret; + size_t ret; pszind_t pind; assert(size > 0); @@ -47,8 +47,8 @@ sz_psz_quantize_ceil(size_t size) { * search would potentially find sufficiently aligned available * memory somewhere lower. */ - ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + - sz_large_pad; + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; } return ret; } @@ -93,12 +93,12 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) { size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1; size_t dst_ind = 0; for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max; - sc_ind++) { + sc_ind++) { const sc_t *sc = &sc_data->sc[sc_ind]; - size_t sz = (ZU(1) << sc->lg_base) + size_t sz = (ZU(1) << sc->lg_base) + (ZU(sc->ndelta) << sc->lg_delta); size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1) - >> SC_LG_TINY_MIN); + >> SC_LG_TINY_MIN); for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) { assert(sc_ind < 1 << (sizeof(uint8_t) * 8)); sz_size2index_tab[dst_ind] = (uint8_t)sc_ind; diff --git a/src/tcache.c b/src/tcache.c index 0154403d..44a96841 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -29,7 +29,7 @@ unsigned opt_tcache_nslots_large = 20; * This is bounded by some other constraints as well, like the fact that it * must be even, must be less than opt_tcache_nslots_small_max, etc.. */ -ssize_t opt_lg_tcache_nslots_mul = 1; +ssize_t opt_lg_tcache_nslots_mul = 1; /* * Number of allocation bytes between tcache incremental GCs. Again, this @@ -63,13 +63,13 @@ unsigned opt_lg_tcache_flush_large_div = 1; * is only used to initialize tcache_nbins in the per-thread tcache. * Directly modifying it will not affect threads already launched. */ -unsigned global_do_not_change_tcache_nbins; +unsigned global_do_not_change_tcache_nbins; /* * Max size class to be cached (can be small or large). This value is only used * to initialize tcache_max in the per-thread tcache. Directly modifying it * will not affect threads already launched. */ -size_t global_do_not_change_tcache_maxclass; +size_t global_do_not_change_tcache_maxclass; /* * Default bin info for each bin. Will be initialized in malloc_conf_init @@ -83,16 +83,16 @@ static cache_bin_info_t opt_tcache_ncached_max[TCACHE_NBINS_MAX] = {{0}}; */ static bool opt_tcache_ncached_max_set[TCACHE_NBINS_MAX] = {0}; -tcaches_t *tcaches; +tcaches_t *tcaches; /* Index of first element within tcaches that has never been used. */ -static unsigned tcaches_past; +static unsigned tcaches_past; /* Head of singly linked list tracking available tcaches elements. */ -static tcaches_t *tcaches_avail; +static tcaches_t *tcaches_avail; /* Protects tcaches{,_past,_avail}. */ -static malloc_mutex_t tcaches_mtx; +static malloc_mutex_t tcaches_mtx; /******************************************************************************/ @@ -180,8 +180,8 @@ tcache_nfill_small_burst_reset(tcache_slow_t *tcache_slow, szind_t szind) { * count should be decreased, i.e. lg_div(base) should be increased. */ static inline void -tcache_nfill_small_gc_update(tcache_slow_t *tcache_slow, szind_t szind, - cache_bin_sz_t limit) { +tcache_nfill_small_gc_update( + tcache_slow_t *tcache_slow, szind_t szind, cache_bin_sz_t limit) { cache_bin_fill_ctl_t *ctl = tcache_bin_fill_ctl_get(tcache_slow, szind); if (!limit && ctl->base > 1) { /* @@ -214,16 +214,17 @@ tcache_gc_item_delay_compute(szind_t szind) { } static inline void * -tcache_gc_small_heuristic_addr_get(tsd_t *tsd, tcache_slow_t *tcache_slow, - szind_t szind) { +tcache_gc_small_heuristic_addr_get( + tsd_t *tsd, tcache_slow_t *tcache_slow, szind_t szind) { assert(szind < SC_NBINS); tsdn_t *tsdn = tsd_tsdn(tsd); - bin_t *bin = arena_bin_choose(tsdn, tcache_slow->arena, szind, NULL); + bin_t *bin = arena_bin_choose(tsdn, tcache_slow->arena, szind, NULL); assert(bin != NULL); malloc_mutex_lock(tsdn, &bin->lock); - edata_t *slab = (bin->slabcur == NULL) ? - edata_heap_first(&bin->slabs_nonfull) : bin->slabcur; + edata_t *slab = (bin->slabcur == NULL) + ? edata_heap_first(&bin->slabs_nonfull) + : bin->slabcur; assert(slab != NULL || edata_heap_empty(&bin->slabs_nonfull)); void *ret = (slab != NULL) ? edata_addr_get(slab) : NULL; assert(ret != NULL || slab == NULL); @@ -250,21 +251,23 @@ tcache_gc_small_nremote_get(cache_bin_t *cache_bin, void *addr, * starting from 2M, so that the total number of disjoint virtual * memory ranges retained by each shard is limited. */ - uintptr_t neighbor_min = ((uintptr_t)addr > TCACHE_GC_NEIGHBOR_LIMIT) ? - ((uintptr_t)addr - TCACHE_GC_NEIGHBOR_LIMIT) : 0; - uintptr_t neighbor_max = ((uintptr_t)addr < (UINTPTR_MAX - - TCACHE_GC_NEIGHBOR_LIMIT)) ? ((uintptr_t)addr + - TCACHE_GC_NEIGHBOR_LIMIT) : UINTPTR_MAX; + uintptr_t neighbor_min = ((uintptr_t)addr > TCACHE_GC_NEIGHBOR_LIMIT) + ? ((uintptr_t)addr - TCACHE_GC_NEIGHBOR_LIMIT) + : 0; + uintptr_t neighbor_max = ((uintptr_t)addr + < (UINTPTR_MAX - TCACHE_GC_NEIGHBOR_LIMIT)) + ? ((uintptr_t)addr + TCACHE_GC_NEIGHBOR_LIMIT) + : UINTPTR_MAX; /* Scan the entire bin to count the number of remote pointers. */ - void **head = cache_bin->stack_head; + void **head = cache_bin->stack_head; cache_bin_sz_t n_remote_slab = 0, n_remote_neighbor = 0; cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin); for (void **cur = head; cur < head + ncached; cur++) { - n_remote_slab += (cache_bin_sz_t)tcache_gc_is_addr_remote(*cur, - slab_min, slab_max); - n_remote_neighbor += (cache_bin_sz_t)tcache_gc_is_addr_remote(*cur, - neighbor_min, neighbor_max); + n_remote_slab += (cache_bin_sz_t)tcache_gc_is_addr_remote( + *cur, slab_min, slab_max); + n_remote_neighbor += (cache_bin_sz_t)tcache_gc_is_addr_remote( + *cur, neighbor_min, neighbor_max); } /* * Note: since slab size is dynamic and can be larger than 2M, i.e. @@ -295,8 +298,8 @@ tcache_gc_small_nremote_get(cache_bin_t *cache_bin, void *addr, /* Shuffle the ptrs in the bin to put the remote pointers at the bottom. */ static inline void tcache_gc_small_bin_shuffle(cache_bin_t *cache_bin, cache_bin_sz_t nremote, - uintptr_t addr_min, uintptr_t addr_max) { - void **swap = NULL; + uintptr_t addr_min, uintptr_t addr_max) { + void **swap = NULL; cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin); cache_bin_sz_t ntop = ncached - nremote, cnt = 0; assert(ntop > 0 && ntop < ncached); @@ -320,13 +323,15 @@ tcache_gc_small_bin_shuffle(cache_bin_t *cache_bin, cache_bin_sz_t nremote, */ if (swap != NULL) { assert(swap < cur); - assert(tcache_gc_is_addr_remote(*swap, addr_min, addr_max)); + assert(tcache_gc_is_addr_remote( + *swap, addr_min, addr_max)); void *tmp = *cur; *cur = *swap; *swap = tmp; swap++; assert(swap <= cur); - assert(tcache_gc_is_addr_remote(*swap, addr_min, addr_max)); + assert(tcache_gc_is_addr_remote( + *swap, addr_min, addr_max)); } continue; } else if (swap == NULL) { @@ -344,8 +349,8 @@ tcache_gc_small_bin_shuffle(cache_bin_t *cache_bin, cache_bin_sz_t nremote, break; } if (!tcache_gc_is_addr_remote(*cur, addr_min, addr_max)) { - assert(tcache_gc_is_addr_remote(*(head + cnt), addr_min, - addr_max)); + assert(tcache_gc_is_addr_remote( + *(head + cnt), addr_min, addr_max)); void *tmp = *cur; *cur = *(head + cnt); *(head + cnt) = tmp; @@ -356,15 +361,17 @@ tcache_gc_small_bin_shuffle(cache_bin_t *cache_bin, cache_bin_sz_t nremote, /* Sanity check to make sure the shuffle is done correctly. */ for (void **cur = head; cur < head + ncached; cur++) { assert(*cur != NULL); - assert(((cur < head + ntop) && !tcache_gc_is_addr_remote( - *cur, addr_min, addr_max)) || ((cur >= head + ntop) && - tcache_gc_is_addr_remote(*cur, addr_min, addr_max))); + assert( + ((cur < head + ntop) + && !tcache_gc_is_addr_remote(*cur, addr_min, addr_max)) + || ((cur >= head + ntop) + && tcache_gc_is_addr_remote(*cur, addr_min, addr_max))); } } static bool -tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, - szind_t szind) { +tcache_gc_small( + tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, szind_t szind) { /* * Aim to flush 3/4 of items below low-water, with remote pointers being * prioritized for flushing. @@ -403,24 +410,26 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, if (nflush < tcache_slow->bin_flush_delay_items[szind]) { /* Workaround for a conversion warning. */ uint8_t nflush_uint8 = (uint8_t)nflush; - assert(sizeof(tcache_slow->bin_flush_delay_items[0]) == - sizeof(nflush_uint8)); - tcache_slow->bin_flush_delay_items[szind] -= nflush_uint8; + assert(sizeof(tcache_slow->bin_flush_delay_items[0]) + == sizeof(nflush_uint8)); + tcache_slow->bin_flush_delay_items[szind] -= + nflush_uint8; return false; } - tcache_slow->bin_flush_delay_items[szind] - = tcache_gc_item_delay_compute(szind); + tcache_slow->bin_flush_delay_items[szind] = + tcache_gc_item_delay_compute(szind); goto label_flush; } /* Directly goto the flush path when the entire bin needs to be flushed. */ - if ( nflush == ncached) { + if (nflush == ncached) { goto label_flush; } /* Query arena binshard to get heuristic locality info. */ - void *addr = tcache_gc_small_heuristic_addr_get(tsd, tcache_slow, szind); + void *addr = tcache_gc_small_heuristic_addr_get( + tsd, tcache_slow, szind); if (addr == NULL) { goto label_flush; } @@ -429,9 +438,9 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, * Use the queried addr above to get the number of remote ptrs in the * bin, and the min/max of the local addr range. */ - uintptr_t addr_min, addr_max; - cache_bin_sz_t nremote = tcache_gc_small_nremote_get(cache_bin, addr, - &addr_min, &addr_max, szind, nflush); + uintptr_t addr_min, addr_max; + cache_bin_sz_t nremote = tcache_gc_small_nremote_get( + cache_bin, addr, &addr_min, &addr_max, szind, nflush); /* * Update the nflush to the larger value between the intended flush count @@ -448,7 +457,7 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, * also be flushed. */ assert(nflush < ncached || nremote == ncached); - if (nremote == 0 || nremote == ncached) { + if (nremote == 0 || nremote == ncached) { goto label_flush; } @@ -467,14 +476,14 @@ label_flush: return false; } assert(nflush <= ncached); - tcache_bin_flush_small(tsd, tcache, cache_bin, szind, - (unsigned)(ncached - nflush)); + tcache_bin_flush_small( + tsd, tcache, cache_bin, szind, (unsigned)(ncached - nflush)); return true; } static bool -tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, - szind_t szind) { +tcache_gc_large( + tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, szind_t szind) { /* * Like the small GC, flush 3/4 of untouched items. However, simply flush * the bottom nflush items, without any locality check. @@ -486,16 +495,16 @@ tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, if (low_water == 0) { return false; } - unsigned nrem = (unsigned)(cache_bin_ncached_get_local(cache_bin) - - low_water + (low_water >> 2)); + unsigned nrem = (unsigned)(cache_bin_ncached_get_local(cache_bin) + - low_water + (low_water >> 2)); tcache_bin_flush_large(tsd, tcache, cache_bin, szind, nrem); return true; } /* Try to gc one bin by szind, return true if there is item flushed. */ static bool -tcache_try_gc_bin(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, - szind_t szind) { +tcache_try_gc_bin( + tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, szind_t szind) { assert(tcache != NULL); cache_bin_t *cache_bin = &tcache->bins[szind]; if (tcache_bin_disabled(szind, cache_bin, tcache_slow)) { @@ -504,8 +513,8 @@ tcache_try_gc_bin(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, bool is_small = (szind < SC_NBINS); tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small); - bool ret = is_small ? tcache_gc_small(tsd, tcache_slow, tcache, szind) : - tcache_gc_large(tsd, tcache_slow, tcache, szind); + bool ret = is_small ? tcache_gc_small(tsd, tcache_slow, tcache, szind) + : tcache_gc_large(tsd, tcache_slow, tcache, szind); cache_bin_low_water_set(cache_bin); return ret; } @@ -536,8 +545,8 @@ tcache_gc_event(tsd_t *tsd) { nstime_update(&now); assert(nstime_compare(&now, &tcache_slow->last_gc_time) >= 0); - if (nstime_ns(&now) - nstime_ns(&tcache_slow->last_gc_time) < - TCACHE_GC_INTERVAL_NS) { + if (nstime_ns(&now) - nstime_ns(&tcache_slow->last_gc_time) + < TCACHE_GC_INTERVAL_NS) { // time interval is too short, skip this event. return; } @@ -546,13 +555,15 @@ tcache_gc_event(tsd_t *tsd) { unsigned gc_small_nbins = 0, gc_large_nbins = 0; unsigned tcache_nbins = tcache_nbins_get(tcache_slow); - unsigned small_nbins = tcache_nbins > SC_NBINS ? SC_NBINS : tcache_nbins; - szind_t szind_small = tcache_slow->next_gc_bin_small; - szind_t szind_large = tcache_slow->next_gc_bin_large; + unsigned small_nbins = tcache_nbins > SC_NBINS ? SC_NBINS + : tcache_nbins; + szind_t szind_small = tcache_slow->next_gc_bin_small; + szind_t szind_large = tcache_slow->next_gc_bin_large; /* Flush at most TCACHE_GC_SMALL_NBINS_MAX small bins at a time. */ - for (unsigned i = 0; i < small_nbins && gc_small_nbins < - TCACHE_GC_SMALL_NBINS_MAX; i++) { + for (unsigned i = 0; + i < small_nbins && gc_small_nbins < TCACHE_GC_SMALL_NBINS_MAX; + i++) { assert(szind_small < SC_NBINS); if (tcache_try_gc_bin(tsd, tcache_slow, tcache, szind_small)) { gc_small_nbins++; @@ -568,8 +579,9 @@ tcache_gc_event(tsd_t *tsd) { } /* Flush at most TCACHE_GC_LARGE_NBINS_MAX large bins at a time. */ - for (unsigned i = SC_NBINS; i < tcache_nbins && gc_large_nbins < - TCACHE_GC_LARGE_NBINS_MAX; i++) { + for (unsigned i = SC_NBINS; + i < tcache_nbins && gc_large_nbins < TCACHE_GC_LARGE_NBINS_MAX; + i++) { assert(szind_large >= SC_NBINS && szind_large < tcache_nbins); if (tcache_try_gc_bin(tsd, tcache_slow, tcache, szind_large)) { gc_large_nbins++; @@ -582,11 +594,10 @@ tcache_gc_event(tsd_t *tsd) { } void * -tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, - tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind, - bool *tcache_success) { +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *cache_bin, szind_t binind, bool *tcache_success) { tcache_slow_t *tcache_slow = tcache->tcache_slow; - void *ret; + void *ret; assert(tcache_slow->arena != NULL); assert(!tcache_bin_disabled(binind, cache_bin, tcache_slow)); @@ -596,8 +607,9 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, nfill = 1; } arena_cache_bin_fill_small(tsdn, arena, cache_bin, binind, - /* nfill_min */ opt_experimental_tcache_gc ? - ((nfill >> 1) + 1) : nfill, /* nfill_max */ nfill); + /* nfill_min */ + opt_experimental_tcache_gc ? ((nfill >> 1) + 1) : nfill, + /* nfill_max */ nfill); tcache_slow->bin_refilled[binind] = true; tcache_nfill_small_burst_prepare(tcache_slow, binind); ret = cache_bin_alloc(cache_bin, tcache_success); @@ -612,8 +624,8 @@ tcache_bin_flush_ptr_getter(void *arr_ctx, size_t ind) { } static void -tcache_bin_flush_metadata_visitor(void *szind_sum_ctx, - emap_full_alloc_ctx_t *alloc_ctx) { +tcache_bin_flush_metadata_visitor( + void *szind_sum_ctx, emap_full_alloc_ctx_t *alloc_ctx) { size_t *szind_sum = (size_t *)szind_sum_ctx; *szind_sum -= alloc_ctx->szind; util_prefetch_write_range(alloc_ctx->edata, sizeof(edata_t)); @@ -640,7 +652,6 @@ tcache_bin_flush_size_check_fail(cache_bin_ptr_array_t *arr, szind_t szind, static void tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr, szind_t binind, size_t nflush, emap_batch_lookup_result_t *edatas) { - /* * This gets compiled away when config_opt_safety_checks is false. * Checks for sized deallocation bugs, failing early rather than @@ -649,16 +660,16 @@ tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr, size_t szind_sum = binind * nflush; emap_edata_lookup_batch(tsd, &arena_emap_global, nflush, &tcache_bin_flush_ptr_getter, (void *)arr, - &tcache_bin_flush_metadata_visitor, (void *)&szind_sum, - edatas); + &tcache_bin_flush_metadata_visitor, (void *)&szind_sum, edatas); if (config_opt_safety_checks && unlikely(szind_sum != 0)) { tcache_bin_flush_size_check_fail(arr, binind, nflush, edatas); } } JEMALLOC_ALWAYS_INLINE void -tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, - szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush) { +tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, + cache_bin_t *cache_bin, szind_t binind, cache_bin_ptr_array_t *ptrs, + unsigned nflush) { tcache_slow_t *tcache_slow = tcache->tcache_slow; /* * A couple lookup calls take tsdn; declare it once for convenience @@ -669,7 +680,8 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin assert(binind < SC_NBINS); arena_t *tcache_arena = tcache_slow->arena; assert(tcache_arena != NULL); - unsigned tcache_binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; + unsigned tcache_binshard = + tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; /* * Variable length array must have > 0 length; the last element is never @@ -727,8 +739,7 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin arena_t *cur_arena = arena_get(tsdn, cur_arena_ind, false); unsigned cur_binshard = edata_binshard_get(cur_edata); - bin_t *cur_bin = arena_get_bin(cur_arena, binind, - cur_binshard); + bin_t *cur_bin = arena_get_bin(cur_arena, binind, cur_binshard); assert(cur_binshard < bin_infos[binind].n_shards); /* @@ -737,16 +748,18 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin */ flush_start++; for (unsigned i = flush_start; i < nflush; i++) { - void *ptr = ptrs->ptr[i]; + void *ptr = ptrs->ptr[i]; edata_t *edata = item_edata[i].edata; assert(ptr != NULL && edata != NULL); - assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(edata)); - assert((uintptr_t)ptr < (uintptr_t)edata_past_get(edata)); + assert( + (uintptr_t)ptr >= (uintptr_t)edata_addr_get(edata)); + assert( + (uintptr_t)ptr < (uintptr_t)edata_past_get(edata)); if (edata_arena_ind_get(edata) == cur_arena_ind && edata_binshard_get(edata) == cur_binshard) { /* Swap the edatas. */ - emap_batch_lookup_result_t temp_edata - = item_edata[flush_start]; + emap_batch_lookup_result_t temp_edata = + item_edata[flush_start]; item_edata[flush_start] = item_edata[i]; item_edata[i] = temp_edata; /* Swap the pointers */ @@ -759,7 +772,7 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin /* Make sure we implemented partitioning correctly. */ if (config_debug) { for (unsigned i = prev_flush_start; i < flush_start; - i++) { + i++) { edata_t *edata = item_edata[i].edata; unsigned arena_ind = edata_arena_ind_get(edata); assert(arena_ind == cur_arena_ind); @@ -768,10 +781,10 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin } for (unsigned i = flush_start; i < nflush; i++) { edata_t *edata = item_edata[i].edata; - assert(edata_arena_ind_get(edata) - != cur_arena_ind + assert( + edata_arena_ind_get(edata) != cur_arena_ind || edata_binshard_get(edata) - != cur_binshard); + != cur_binshard); } } @@ -817,7 +830,7 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin bool home_binshard = (cur_arena == tcache_arena && cur_binshard == tcache_binshard); bool can_batch = (flush_start - prev_flush_start - <= opt_bin_info_remote_free_max_batch) + <= opt_bin_info_remote_free_max_batch) && !home_binshard && bin_is_batched; /* @@ -831,8 +844,8 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin locked = !malloc_mutex_trylock(tsdn, &cur_bin->lock); } if (can_batch && !locked) { - bin_with_batch_t *batched_bin = - (bin_with_batch_t *)cur_bin; + bin_with_batch_t *batched_bin = (bin_with_batch_t *) + cur_bin; size_t push_idx = batcher_push_begin(tsdn, &batched_bin->remote_frees, flush_start - prev_flush_start); @@ -840,19 +853,19 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin if (push_idx != BATCHER_NO_IDX) { batched = true; - unsigned nbatched - = flush_start - prev_flush_start; + unsigned nbatched = flush_start + - prev_flush_start; for (unsigned i = 0; i < nbatched; i++) { unsigned src_ind = prev_flush_start + i; - batched_bin->remote_free_data[ - push_idx + i].ptr - = ptrs->ptr[src_ind]; - batched_bin->remote_free_data[ - push_idx + i].slab - = item_edata[src_ind].edata; + batched_bin + ->remote_free_data[push_idx + i] + .ptr = ptrs->ptr[src_ind]; + batched_bin + ->remote_free_data[push_idx + i] + .slab = item_edata[src_ind].edata; } - batcher_push_end(tsdn, - &batched_bin->remote_frees); + batcher_push_end( + tsdn, &batched_bin->remote_frees); } else { batch_failed = true; } @@ -887,16 +900,17 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin cache_bin->tstats.nrequests = 0; } unsigned preallocated_slabs = nflush; - unsigned ndalloc_slabs = arena_bin_batch_get_ndalloc_slabs( - preallocated_slabs); + unsigned ndalloc_slabs = + arena_bin_batch_get_ndalloc_slabs( + preallocated_slabs); /* Next flush objects our own objects. */ /* Init only to avoid used-uninitialized warning. */ arena_dalloc_bin_locked_info_t dalloc_bin_info = {0}; arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind); for (unsigned i = prev_flush_start; i < flush_start; - i++) { - void *ptr = ptrs->ptr[i]; + i++) { + void *ptr = ptrs->ptr[i]; edata_t *edata = item_edata[i].edata; arena_dalloc_bin_locked_step(tsdn, cur_arena, cur_bin, &dalloc_bin_info, binind, edata, @@ -910,16 +924,16 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin if (bin_is_batched) { arena_bin_flush_batch_impl(tsdn, cur_arena, cur_bin, &dalloc_bin_info, binind, - dalloc_slabs, ndalloc_slabs, - &dalloc_count, &dalloc_slabs_extra); + dalloc_slabs, ndalloc_slabs, &dalloc_count, + &dalloc_slabs_extra); } - arena_dalloc_bin_locked_finish(tsdn, cur_arena, cur_bin, - &dalloc_bin_info); + arena_dalloc_bin_locked_finish( + tsdn, cur_arena, cur_bin, &dalloc_bin_info); malloc_mutex_unlock(tsdn, &cur_bin->lock); } - arena_decay_ticks(tsdn, cur_arena, - flush_start - prev_flush_start); + arena_decay_ticks( + tsdn, cur_arena, flush_start - prev_flush_start); } /* Handle all deferred slab dalloc. */ @@ -934,24 +948,24 @@ tcache_bin_flush_impl_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin } if (config_stats && !merged_stats) { - /* + /* * The flush loop didn't happen to flush to this * thread's arena, so the stats didn't get merged. * Manually do so now. */ - bin_t *bin = arena_bin_choose(tsdn, tcache_arena, - binind, NULL); - malloc_mutex_lock(tsdn, &bin->lock); - bin->stats.nflushes++; - bin->stats.nrequests += cache_bin->tstats.nrequests; - cache_bin->tstats.nrequests = 0; - malloc_mutex_unlock(tsdn, &bin->lock); + bin_t *bin = arena_bin_choose(tsdn, tcache_arena, binind, NULL); + malloc_mutex_lock(tsdn, &bin->lock); + bin->stats.nflushes++; + bin->stats.nrequests += cache_bin->tstats.nrequests; + cache_bin->tstats.nrequests = 0; + malloc_mutex_unlock(tsdn, &bin->lock); } } JEMALLOC_ALWAYS_INLINE void -tcache_bin_flush_impl_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, - szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush) { +tcache_bin_flush_impl_large(tsd_t *tsd, tcache_t *tcache, + cache_bin_t *cache_bin, szind_t binind, cache_bin_ptr_array_t *ptrs, + unsigned nflush) { tcache_slow_t *tcache_slow = tcache->tcache_slow; /* * A couple lookup calls take tsdn; declare it once for convenience @@ -1009,8 +1023,7 @@ tcache_bin_flush_impl_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin assert(ptr != NULL && edata != NULL); if (edata_arena_ind_get(edata) == cur_arena_ind) { - large_dalloc_prep_locked(tsdn, - edata); + large_dalloc_prep_locked(tsdn, edata); } } if (!arena_is_auto(cur_arena)) { @@ -1035,8 +1048,8 @@ tcache_bin_flush_impl_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin ndeferred++; continue; } - if (large_dalloc_safety_checks(edata, ptr, - sz_index2size(binind))) { + if (large_dalloc_safety_checks( + edata, ptr, sz_index2size(binind))) { /* See the comment in isfree. */ continue; } @@ -1048,8 +1061,7 @@ tcache_bin_flush_impl_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin if (config_stats && !merged_stats) { arena_stats_large_flush_nrequests_add(tsdn, - &tcache_arena->stats, binind, - cache_bin->tstats.nrequests); + &tcache_arena->stats, binind, cache_bin->tstats.nrequests); cache_bin->tstats.nrequests = 0; } } @@ -1058,7 +1070,7 @@ JEMALLOC_ALWAYS_INLINE void tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush, bool small) { assert(ptrs != NULL && ptrs->ptr != NULL); - unsigned nflush_batch, nflushed = 0; + unsigned nflush_batch, nflushed = 0; cache_bin_ptr_array_t ptrs_batch; do { nflush_batch = nflush - nflushed; @@ -1078,11 +1090,11 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, * '...' is morally equivalent, the code itself needs slight tweaks. */ if (small) { - tcache_bin_flush_impl_small(tsd, tcache, cache_bin, binind, - &ptrs_batch, nflush_batch); + tcache_bin_flush_impl_small(tsd, tcache, cache_bin, + binind, &ptrs_batch, nflush_batch); } else { - tcache_bin_flush_impl_large(tsd, tcache, cache_bin, binind, - &ptrs_batch, nflush_batch); + tcache_bin_flush_impl_large(tsd, tcache, cache_bin, + binind, &ptrs_batch, nflush_batch); } nflushed += nflush_batch; } while (nflushed < nflush); @@ -1117,8 +1129,8 @@ tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush); cache_bin_init_ptr_array_for_flush(cache_bin, &ptrs, nflush); - tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush, - small); + tcache_bin_flush_impl( + tsd, tcache, cache_bin, binind, &ptrs, nflush, small); cache_bin_finish_flush(cache_bin, &ptrs, nflush); } @@ -1157,7 +1169,7 @@ tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, * items remain unchanged -- the stashed items reside on the other end * of the stack. Checking the stack head and ncached to verify. */ - void *head_content = *cache_bin->stack_head; + void *head_content = *cache_bin->stack_head; cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin); cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin); @@ -1167,11 +1179,11 @@ tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, } CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed); - cache_bin_init_ptr_array_for_stashed(cache_bin, binind, &ptrs, - nstashed); + cache_bin_init_ptr_array_for_stashed( + cache_bin, binind, &ptrs, nstashed); san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind)); - tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed, - is_small); + tcache_bin_flush_impl( + tsd, tcache, cache_bin, binind, &ptrs, nstashed, is_small); cache_bin_finish_flush_stashed(cache_bin); assert(cache_bin_nstashed_get_local(cache_bin) == 0); @@ -1190,8 +1202,8 @@ tcache_get_default_ncached_max(void) { } bool -tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size, - cache_bin_sz_t *ncached_max) { +tcache_bin_ncached_max_read( + tsd_t *tsd, size_t bin_size, cache_bin_sz_t *ncached_max) { if (bin_size > TCACHE_MAXCLASS_LIMIT) { return true; } @@ -1206,8 +1218,9 @@ tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size, szind_t bin_ind = sz_size2index(bin_size); cache_bin_t *bin = &tcache->bins[bin_ind]; - *ncached_max = tcache_bin_disabled(bin_ind, bin, tcache->tcache_slow) ? - 0: cache_bin_ncached_max_get(bin); + *ncached_max = tcache_bin_disabled(bin_ind, bin, tcache->tcache_slow) + ? 0 + : cache_bin_ncached_max_get(bin); return false; } @@ -1233,17 +1246,17 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, } static void -tcache_arena_dissociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, - tcache_t *tcache) { +tcache_arena_dissociate( + tsdn_t *tsdn, tcache_slow_t *tcache_slow, tcache_t *tcache) { arena_t *arena = tcache_slow->arena; assert(arena != NULL); if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); if (config_debug) { - bool in_ql = false; + bool in_ql = false; tcache_slow_t *iter; - ql_foreach(iter, &arena->tcache_ql, link) { + ql_foreach (iter, &arena->tcache_ql, link) { if (iter == tcache_slow) { in_ql = true; break; @@ -1276,8 +1289,8 @@ tcache_default_settings_init(tcache_slow_t *tcache_slow) { } static void -tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, - void *mem, const cache_bin_info_t *tcache_bin_info) { +tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, void *mem, + const cache_bin_info_t *tcache_bin_info) { tcache->tcache_slow = tcache_slow; tcache_slow->tcache = tcache; @@ -1296,23 +1309,22 @@ tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, * worrying about which ones are disabled. */ unsigned tcache_nbins = tcache_nbins_get(tcache_slow); - size_t cur_offset = 0; - cache_bin_preincrement(tcache_bin_info, tcache_nbins, mem, - &cur_offset); + size_t cur_offset = 0; + cache_bin_preincrement(tcache_bin_info, tcache_nbins, mem, &cur_offset); for (unsigned i = 0; i < tcache_nbins; i++) { if (i < SC_NBINS) { tcache_bin_fill_ctl_init(tcache_slow, i); tcache_slow->bin_refilled[i] = false; - tcache_slow->bin_flush_delay_items[i] - = tcache_gc_item_delay_compute(i); + tcache_slow->bin_flush_delay_items[i] = + tcache_gc_item_delay_compute(i); } cache_bin_t *cache_bin = &tcache->bins[i]; if (tcache_bin_info[i].ncached_max > 0) { - cache_bin_init(cache_bin, &tcache_bin_info[i], mem, - &cur_offset); + cache_bin_init( + cache_bin, &tcache_bin_info[i], mem, &cur_offset); } else { - cache_bin_init_disabled(cache_bin, - tcache_bin_info[i].ncached_max); + cache_bin_init_disabled( + cache_bin, tcache_bin_info[i].ncached_max); } } /* @@ -1323,8 +1335,8 @@ tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, */ for (unsigned i = tcache_nbins; i < TCACHE_NBINS_MAX; i++) { cache_bin_t *cache_bin = &tcache->bins[i]; - cache_bin_init_disabled(cache_bin, - tcache_bin_info[i].ncached_max); + cache_bin_init_disabled( + cache_bin, tcache_bin_info[i].ncached_max); assert(tcache_bin_disabled(i, cache_bin, tcache->tcache_slow)); } @@ -1332,8 +1344,8 @@ tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, if (config_debug) { /* Sanity check that the whole stack is used. */ size_t size, alignment; - cache_bin_info_compute_alloc(tcache_bin_info, tcache_nbins, - &size, &alignment); + cache_bin_info_compute_alloc( + tcache_bin_info, tcache_nbins, &size, &alignment); assert(cur_offset == size); } } @@ -1402,26 +1414,26 @@ tcache_bin_info_compute(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) { * than tcache_nbins, no items will be cached. */ for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) { - unsigned ncached_max = tcache_get_default_ncached_max_set(i) ? - (unsigned)tcache_get_default_ncached_max()[i].ncached_max: - tcache_ncached_max_compute(i); + unsigned ncached_max = tcache_get_default_ncached_max_set(i) + ? (unsigned)tcache_get_default_ncached_max()[i].ncached_max + : tcache_ncached_max_compute(i); assert(ncached_max <= CACHE_BIN_NCACHED_MAX); - cache_bin_info_init(&tcache_bin_info[i], - (cache_bin_sz_t)ncached_max); + cache_bin_info_init( + &tcache_bin_info[i], (cache_bin_sz_t)ncached_max); } } static bool -tsd_tcache_data_init_impl(tsd_t *tsd, arena_t *arena, - const cache_bin_info_t *tcache_bin_info) { +tsd_tcache_data_init_impl( + tsd_t *tsd, arena_t *arena, const cache_bin_info_t *tcache_bin_info) { tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd); - tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); + tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); assert(cache_bin_still_zero_initialized(&tcache->bins[0])); unsigned tcache_nbins = tcache_nbins_get(tcache_slow); - size_t size, alignment; - cache_bin_info_compute_alloc(tcache_bin_info, tcache_nbins, - &size, &alignment); + size_t size, alignment; + cache_bin_info_compute_alloc( + tcache_bin_info, tcache_nbins, &size, &alignment); void *mem; if (cache_bin_stack_use_thp()) { @@ -1450,16 +1462,16 @@ tsd_tcache_data_init_impl(tsd_t *tsd, arena_t *arena, if (!malloc_initialized()) { /* If in initialization, assign to a0. */ arena = arena_get(tsd_tsdn(tsd), 0, false); - tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache, - arena); + tcache_arena_associate( + tsd_tsdn(tsd), tcache_slow, tcache, arena); } else { if (arena == NULL) { arena = arena_choose(tsd, NULL); } /* This may happen if thread.tcache.enabled is used. */ if (tcache_slow->arena == NULL) { - tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, - tcache, arena); + tcache_arena_associate( + tsd_tsdn(tsd), tcache_slow, tcache, arena); } } assert(arena == tcache_slow->arena); @@ -1484,30 +1496,29 @@ tcache_create_explicit(tsd_t *tsd) { * the cache bins have the requested alignment. */ unsigned tcache_nbins = global_do_not_change_tcache_nbins; - size_t tcache_size, alignment; + size_t tcache_size, alignment; cache_bin_info_compute_alloc(tcache_get_default_ncached_max(), tcache_nbins, &tcache_size, &alignment); - size_t size = tcache_size + sizeof(tcache_t) - + sizeof(tcache_slow_t); + size_t size = tcache_size + sizeof(tcache_t) + sizeof(tcache_slow_t); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); size = sz_sa2u(size, alignment); - void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment, - true, NULL, true, arena_get(TSDN_NULL, 0, true)); + void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); if (mem == NULL) { return NULL; } - tcache_t *tcache = (void *)((byte_t *)mem + tcache_size); - tcache_slow_t *tcache_slow = - (void *)((byte_t *)mem + tcache_size + sizeof(tcache_t)); + tcache_t *tcache = (void *)((byte_t *)mem + tcache_size); + tcache_slow_t *tcache_slow = (void *)((byte_t *)mem + tcache_size + + sizeof(tcache_t)); tcache_default_settings_init(tcache_slow); - tcache_init(tsd, tcache_slow, tcache, mem, - tcache_get_default_ncached_max()); + tcache_init( + tsd, tcache_slow, tcache, mem, tcache_get_default_ncached_max()); - tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache, - arena_ichoose(tsd, NULL)); + tcache_arena_associate( + tsd_tsdn(tsd), tcache_slow, tcache, arena_ichoose(tsd, NULL)); return tcache; } @@ -1525,8 +1536,8 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) { if (opt_tcache) { /* Trigger tcache init. */ - tsd_tcache_data_init(tsd, NULL, - tcache_get_default_ncached_max()); + tsd_tcache_data_init( + tsd, NULL, tcache_get_default_ncached_max()); } return false; @@ -1537,8 +1548,8 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) { bool was_enabled = tsd_tcache_enabled_get(tsd); if (!was_enabled && enabled) { - tsd_tcache_data_init(tsd, NULL, - tcache_get_default_ncached_max()); + tsd_tcache_data_init( + tsd, NULL, tcache_get_default_ncached_max()); } else if (was_enabled && !enabled) { tcache_cleanup(tsd); } @@ -1551,12 +1562,12 @@ void thread_tcache_max_set(tsd_t *tsd, size_t tcache_max) { assert(tcache_max <= TCACHE_MAXCLASS_LIMIT); assert(tcache_max == sz_s2u(tcache_max)); - tcache_t *tcache = tsd_tcachep_get(tsd); - tcache_slow_t *tcache_slow = tcache->tcache_slow; + tcache_t *tcache = tsd_tcachep_get(tsd); + tcache_slow_t *tcache_slow = tcache->tcache_slow; cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {{0}}; assert(tcache != NULL && tcache_slow != NULL); - bool enabled = tcache_available(tsd); + bool enabled = tcache_available(tsd); arena_t *assigned_arena JEMALLOC_CLANG_ANALYZER_SILENCE_INIT(NULL); if (enabled) { assigned_arena = tcache_slow->arena; @@ -1587,16 +1598,16 @@ tcache_bin_info_settings_parse(const char *bin_settings_segment_cur, do { size_t size_start, size_end; size_t ncached_max; - bool err = multi_setting_parse_next(&bin_settings_segment_cur, - &len_left, &size_start, &size_end, &ncached_max); + bool err = multi_setting_parse_next(&bin_settings_segment_cur, + &len_left, &size_start, &size_end, &ncached_max); if (err) { return true; } if (size_end > TCACHE_MAXCLASS_LIMIT) { size_end = TCACHE_MAXCLASS_LIMIT; } - if (size_start > TCACHE_MAXCLASS_LIMIT || - size_start > size_end) { + if (size_start > TCACHE_MAXCLASS_LIMIT + || size_start > size_end) { continue; } /* May get called before sz_init (during malloc_conf_init). */ @@ -1606,8 +1617,8 @@ tcache_bin_info_settings_parse(const char *bin_settings_segment_cur, ncached_max = (size_t)CACHE_BIN_NCACHED_MAX; } for (szind_t i = bin_start; i <= bin_end; i++) { - cache_bin_info_init(&tcache_bin_info[i], - (cache_bin_sz_t)ncached_max); + cache_bin_info_init( + &tcache_bin_info[i], (cache_bin_sz_t)ncached_max); if (bin_info_is_set != NULL) { bin_info_is_set[i] = true; } @@ -1618,13 +1629,12 @@ tcache_bin_info_settings_parse(const char *bin_settings_segment_cur, } bool -tcache_bin_info_default_init(const char *bin_settings_segment_cur, - size_t len_left) { +tcache_bin_info_default_init( + const char *bin_settings_segment_cur, size_t len_left) { return tcache_bin_info_settings_parse(bin_settings_segment_cur, len_left, opt_tcache_ncached_max, opt_tcache_ncached_max_set); } - bool tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len) { assert(tcache_available(tsd)); @@ -1634,15 +1644,14 @@ tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len) { cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]; tcache_bin_settings_backup(tcache, tcache_bin_info); - if(tcache_bin_info_settings_parse(settings, len, tcache_bin_info, - NULL)) { + if (tcache_bin_info_settings_parse( + settings, len, tcache_bin_info, NULL)) { return true; } arena_t *assigned_arena = tcache->tcache_slow->arena; tcache_cleanup(tsd); - tsd_tcache_data_init(tsd, assigned_arena, - tcache_bin_info); + tsd_tcache_data_init(tsd, assigned_arena, tcache_bin_info); return false; } @@ -1698,11 +1707,11 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { * tsd). Manually trigger decay to avoid pathological cases. Also * include arena 0 because the tcache array is allocated from it. */ - arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false), - false, false); + arena_decay( + tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false), false, false); - if (arena_nthreads_get(arena, false) == 0 && - !background_thread_enabled()) { + if (arena_nthreads_get(arena, false) == 0 + && !background_thread_enabled()) { /* Force purging when no threads assigned to the arena anymore. */ arena_decay(tsd_tsdn(tsd), arena, /* is_background_thread */ false, /* all */ true); @@ -1760,7 +1769,7 @@ tcaches_create_prep(tsd_t *tsd, base_t *base) { if (tcaches == NULL) { tcaches = base_alloc(tsd_tsdn(tsd), base, - sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE); + sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX + 1), CACHELINE); if (tcaches == NULL) { err = true; goto label_return; @@ -1851,7 +1860,7 @@ void tcaches_destroy(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); tcaches_t *elm = &tcaches[ind]; - tcache_t *tcache = tcaches_elm_remove(tsd, elm, false); + tcache_t *tcache = tcaches_elm_remove(tsd, elm, false); elm->next = tcaches_avail; tcaches_avail = elm; malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); @@ -1875,7 +1884,7 @@ tcache_boot(tsdn_t *tsdn, base_t *base) { tcache_bin_info_compute(opt_tcache_ncached_max); if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, - malloc_mutex_rank_exclusive)) { + malloc_mutex_rank_exclusive)) { return true; } @@ -1897,7 +1906,8 @@ tcache_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &tcaches_mtx); } -void tcache_assert_initialized(tcache_t *tcache) { +void +tcache_assert_initialized(tcache_t *tcache) { assert(!cache_bin_still_zero_initialized(&tcache->bins[0])); } @@ -1908,8 +1918,8 @@ tcache_gc_enabled(void) { /* Handles alloc and dalloc the same way */ te_base_cb_t tcache_gc_te_handler = { - .enabled = &tcache_gc_enabled, - .new_event_wait = &tcache_gc_new_event_wait, - .postponed_event_wait = &tcache_gc_postponed_event_wait, - .event_handler = &tcache_gc_event, + .enabled = &tcache_gc_enabled, + .new_event_wait = &tcache_gc_new_event_wait, + .postponed_event_wait = &tcache_gc_postponed_event_wait, + .event_handler = &tcache_gc_event, }; diff --git a/src/thread_event.c b/src/thread_event.c index 496c16be..c59027ed 100644 --- a/src/thread_event.c +++ b/src/thread_event.c @@ -16,7 +16,8 @@ te_ctx_has_active_events(te_ctx_t *ctx) { } } else { for (int i = 0; i < te_dalloc_count; ++i) { - if (te_enabled_yes == te_dalloc_handlers[i]->enabled()) { + if (te_enabled_yes + == te_dalloc_handlers[i]->enabled()) { return true; } } @@ -26,12 +27,11 @@ te_ctx_has_active_events(te_ctx_t *ctx) { static uint64_t te_next_event_compute(tsd_t *tsd, bool is_alloc) { - te_base_cb_t **handlers = is_alloc ? - te_alloc_handlers : te_dalloc_handlers; - uint64_t *waits = is_alloc ? - tsd_te_datap_get_unsafe(tsd)->alloc_wait : - tsd_te_datap_get_unsafe(tsd)->dalloc_wait; - int count = is_alloc ? te_alloc_count : te_dalloc_count; + te_base_cb_t **handlers = is_alloc ? te_alloc_handlers + : te_dalloc_handlers; + uint64_t *waits = is_alloc ? tsd_te_datap_get_unsafe(tsd)->alloc_wait + : tsd_te_datap_get_unsafe(tsd)->dalloc_wait; + int count = is_alloc ? te_alloc_count : te_dalloc_count; uint64_t wait = TE_MAX_START_WAIT; @@ -86,9 +86,9 @@ te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) { * below is stronger than needed, but having an exactly accurate guard * is more complicated to implement. */ - assert((!te_ctx_has_active_events(ctx) && last_event == 0U) || - interval == min_wait || - (interval < min_wait && interval == TE_MAX_INTERVAL)); + assert((!te_ctx_has_active_events(ctx) && last_event == 0U) + || interval == min_wait + || (interval < min_wait && interval == TE_MAX_INTERVAL)); } void @@ -151,8 +151,9 @@ te_assert_invariants_debug(tsd_t *tsd) { static void te_ctx_next_event_fast_update(te_ctx_t *ctx) { uint64_t next_event = te_ctx_next_event_get(ctx); - uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ? - next_event : 0U; + uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) + ? next_event + : 0U; te_ctx_next_event_fast_set(ctx, next_event_fast); } @@ -177,8 +178,7 @@ te_recompute_fast_threshold(tsd_t *tsd) { } static inline void -te_adjust_thresholds_impl(tsd_t *tsd, te_ctx_t *ctx, - uint64_t wait) { +te_adjust_thresholds_impl(tsd_t *tsd, te_ctx_t *ctx, uint64_t wait) { /* * The next threshold based on future events can only be adjusted after * progressing the last_event counter (which is set to current). @@ -186,23 +186,22 @@ te_adjust_thresholds_impl(tsd_t *tsd, te_ctx_t *ctx, assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx)); assert(wait <= TE_MAX_START_WAIT); - uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <= - TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL); + uint64_t next_event = te_ctx_last_event_get(ctx) + + (wait <= TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL); te_ctx_next_event_set(tsd, ctx, next_event); } void -te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx, - uint64_t wait) { +te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx, uint64_t wait) { te_adjust_thresholds_impl(tsd, ctx, wait); } static void te_init_waits(tsd_t *tsd, uint64_t *wait, bool is_alloc) { - te_base_cb_t **handlers = is_alloc ? te_alloc_handlers : te_dalloc_handlers; - uint64_t *waits = is_alloc ? - tsd_te_datap_get_unsafe(tsd)->alloc_wait : - tsd_te_datap_get_unsafe(tsd)->dalloc_wait; - int count = is_alloc ? te_alloc_count : te_dalloc_count; + te_base_cb_t **handlers = is_alloc ? te_alloc_handlers + : te_dalloc_handlers; + uint64_t *waits = is_alloc ? tsd_te_datap_get_unsafe(tsd)->alloc_wait + : tsd_te_datap_get_unsafe(tsd)->dalloc_wait; + int count = is_alloc ? te_alloc_count : te_dalloc_count; for (int i = 0; i < count; i++) { if (te_enabled_yes == handlers[i]->enabled()) { uint64_t ev_wait = handlers[i]->new_event_wait(tsd); @@ -216,25 +215,23 @@ te_init_waits(tsd_t *tsd, uint64_t *wait, bool is_alloc) { } static inline bool -te_update_wait(tsd_t *tsd, uint64_t accumbytes, bool allow, - uint64_t *ev_wait, uint64_t *wait, te_base_cb_t *handler, - uint64_t new_wait) { +te_update_wait(tsd_t *tsd, uint64_t accumbytes, bool allow, uint64_t *ev_wait, + uint64_t *wait, te_base_cb_t *handler, uint64_t new_wait) { bool ret = false; if (*ev_wait > accumbytes) { - *ev_wait -= accumbytes; - } else if (!allow) { - *ev_wait = handler->postponed_event_wait(tsd); - } else { - ret = true; - *ev_wait = new_wait == 0 ? - handler->new_event_wait(tsd) : - new_wait; - } + *ev_wait -= accumbytes; + } else if (!allow) { + *ev_wait = handler->postponed_event_wait(tsd); + } else { + ret = true; + *ev_wait = new_wait == 0 ? handler->new_event_wait(tsd) + : new_wait; + } - assert(*ev_wait > 0); - if (*ev_wait < *wait) { - *wait = *ev_wait; - } + assert(*ev_wait > 0); + if (*ev_wait < *wait) { + *wait = *ev_wait; + } return ret; } @@ -242,32 +239,32 @@ extern uint64_t stats_interval_accum_batch; /* Return number of handlers enqueued into to_trigger array */ static inline size_t te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, - uint64_t accumbytes, bool allow, uint64_t *wait) { + uint64_t accumbytes, bool allow, uint64_t *wait) { /* * We do not loop and invoke the functions via interface because * of the perf cost. This path is relatively hot, so we sacrifice * elegance for perf. */ - size_t nto_trigger = 0; + size_t nto_trigger = 0; uint64_t *waits = tsd_te_datap_get_unsafe(tsd)->alloc_wait; if (opt_tcache_gc_incr_bytes > 0) { - assert(te_enabled_yes == - te_alloc_handlers[te_alloc_tcache_gc]->enabled()); + assert(te_enabled_yes + == te_alloc_handlers[te_alloc_tcache_gc]->enabled()); if (te_update_wait(tsd, accumbytes, allow, - &waits[te_alloc_tcache_gc], wait, - te_alloc_handlers[te_alloc_tcache_gc], - opt_tcache_gc_incr_bytes)) { + &waits[te_alloc_tcache_gc], wait, + te_alloc_handlers[te_alloc_tcache_gc], + opt_tcache_gc_incr_bytes)) { to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_tcache_gc]; } } #ifdef JEMALLOC_PROF - if (opt_prof) { - assert(te_enabled_yes == - te_alloc_handlers[te_alloc_prof_sample]->enabled()); - if(te_update_wait(tsd, accumbytes, allow, - &waits[te_alloc_prof_sample], wait, - te_alloc_handlers[te_alloc_prof_sample], 0)) { + if (opt_prof) { + assert(te_enabled_yes + == te_alloc_handlers[te_alloc_prof_sample]->enabled()); + if (te_update_wait(tsd, accumbytes, allow, + &waits[te_alloc_prof_sample], wait, + te_alloc_handlers[te_alloc_prof_sample], 0)) { to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_prof_sample]; } @@ -275,12 +272,12 @@ te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, #endif if (opt_stats_interval >= 0) { if (te_update_wait(tsd, accumbytes, allow, - &waits[te_alloc_stats_interval], - wait, - te_alloc_handlers[te_alloc_stats_interval], - stats_interval_accum_batch)) { - assert(te_enabled_yes == - te_alloc_handlers[te_alloc_stats_interval]->enabled()); + &waits[te_alloc_stats_interval], wait, + te_alloc_handlers[te_alloc_stats_interval], + stats_interval_accum_batch)) { + assert(te_enabled_yes + == te_alloc_handlers[te_alloc_stats_interval] + ->enabled()); to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_stats_interval]; } @@ -288,30 +285,30 @@ te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, #ifdef JEMALLOC_STATS assert(te_enabled_yes == te_alloc_handlers[te_alloc_peak]->enabled()); - if(te_update_wait(tsd, accumbytes, allow, &waits[te_alloc_peak], wait, - te_alloc_handlers[te_alloc_peak], PEAK_EVENT_WAIT)) { + if (te_update_wait(tsd, accumbytes, allow, &waits[te_alloc_peak], wait, + te_alloc_handlers[te_alloc_peak], PEAK_EVENT_WAIT)) { to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_peak]; - } + } - assert(te_enabled_yes == - te_alloc_handlers[te_alloc_prof_threshold]->enabled()); - if(te_update_wait(tsd, accumbytes, allow, - &waits[te_alloc_prof_threshold], wait, - te_alloc_handlers[te_alloc_prof_threshold], - 1 << opt_experimental_lg_prof_threshold)) { + assert(te_enabled_yes + == te_alloc_handlers[te_alloc_prof_threshold]->enabled()); + if (te_update_wait(tsd, accumbytes, allow, + &waits[te_alloc_prof_threshold], wait, + te_alloc_handlers[te_alloc_prof_threshold], + 1 << opt_experimental_lg_prof_threshold)) { to_trigger[nto_trigger++] = te_alloc_handlers[te_alloc_prof_threshold]; - } + } #endif for (te_alloc_t ue = te_alloc_user0; ue <= te_alloc_user3; ue++) { - te_enabled_t status = - te_user_event_enabled(ue - te_alloc_user0, true); + te_enabled_t status = te_user_event_enabled( + ue - te_alloc_user0, true); if (status == te_enabled_not_installed) { break; } else if (status == te_enabled_yes) { if (te_update_wait(tsd, accumbytes, allow, &waits[ue], - wait, te_alloc_handlers[ue], 0)) { + wait, te_alloc_handlers[ue], 0)) { to_trigger[nto_trigger++] = te_alloc_handlers[ue]; } @@ -321,37 +318,36 @@ te_update_alloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, } static inline size_t -te_update_dalloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, uint64_t accumbytes, - bool allow, uint64_t *wait) { - size_t nto_trigger = 0; +te_update_dalloc_events(tsd_t *tsd, te_base_cb_t **to_trigger, + uint64_t accumbytes, bool allow, uint64_t *wait) { + size_t nto_trigger = 0; uint64_t *waits = tsd_te_datap_get_unsafe(tsd)->dalloc_wait; if (opt_tcache_gc_incr_bytes > 0) { - assert(te_enabled_yes == - te_dalloc_handlers[te_dalloc_tcache_gc]->enabled()); + assert(te_enabled_yes + == te_dalloc_handlers[te_dalloc_tcache_gc]->enabled()); if (te_update_wait(tsd, accumbytes, allow, - &waits[te_dalloc_tcache_gc], wait, - te_dalloc_handlers[te_dalloc_tcache_gc], - opt_tcache_gc_incr_bytes)) { + &waits[te_dalloc_tcache_gc], wait, + te_dalloc_handlers[te_dalloc_tcache_gc], + opt_tcache_gc_incr_bytes)) { to_trigger[nto_trigger++] = te_dalloc_handlers[te_dalloc_tcache_gc]; } - } + } #ifdef JEMALLOC_STATS assert(te_enabled_yes == te_dalloc_handlers[te_dalloc_peak]->enabled()); - if(te_update_wait(tsd, accumbytes, allow, &waits[te_dalloc_peak], wait, - te_dalloc_handlers[te_dalloc_peak], - PEAK_EVENT_WAIT)) { + if (te_update_wait(tsd, accumbytes, allow, &waits[te_dalloc_peak], wait, + te_dalloc_handlers[te_dalloc_peak], PEAK_EVENT_WAIT)) { to_trigger[nto_trigger++] = te_dalloc_handlers[te_dalloc_peak]; - } + } #endif for (te_dalloc_t ue = te_dalloc_user0; ue <= te_dalloc_user3; ue++) { - te_enabled_t status = - te_user_event_enabled(ue - te_dalloc_user0, false); + te_enabled_t status = te_user_event_enabled( + ue - te_dalloc_user0, false); if (status == te_enabled_not_installed) { break; } else if (status == te_enabled_yes) { if (te_update_wait(tsd, accumbytes, allow, &waits[ue], - wait, te_dalloc_handlers[ue], 0)) { + wait, te_dalloc_handlers[ue], 0)) { to_trigger[nto_trigger++] = te_dalloc_handlers[ue]; } @@ -369,26 +365,22 @@ te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) { te_ctx_last_event_set(ctx, bytes_after); - bool allow_event_trigger = tsd_nominal(tsd) && - tsd_reentrancy_level_get(tsd) == 0; + bool allow_event_trigger = tsd_nominal(tsd) + && tsd_reentrancy_level_get(tsd) == 0; uint64_t wait = TE_MAX_START_WAIT; - assert((int)te_alloc_count >= (int) te_dalloc_count); + assert((int)te_alloc_count >= (int)te_dalloc_count); te_base_cb_t *to_trigger[te_alloc_count]; - size_t nto_trigger; + size_t nto_trigger; if (ctx->is_alloc) { - nto_trigger = te_update_alloc_events(tsd, to_trigger, - accumbytes, - allow_event_trigger, - &wait); + nto_trigger = te_update_alloc_events( + tsd, to_trigger, accumbytes, allow_event_trigger, &wait); } else { - nto_trigger = te_update_dalloc_events(tsd, to_trigger, - accumbytes, - allow_event_trigger, - &wait); + nto_trigger = te_update_dalloc_events( + tsd, to_trigger, accumbytes, allow_event_trigger, &wait); } - assert(wait <= TE_MAX_START_WAIT); + assert(wait <= TE_MAX_START_WAIT); te_adjust_thresholds_helper(tsd, ctx, wait); te_assert_invariants(tsd); diff --git a/src/thread_event_registry.c b/src/thread_event_registry.c index f5408178..05882616 100644 --- a/src/thread_event_registry.c +++ b/src/thread_event_registry.c @@ -145,34 +145,25 @@ TE_USER_HANDLER_BINDING_IDX(3); /* Table of all the thread events. */ te_base_cb_t *te_alloc_handlers[te_alloc_count] = { #ifdef JEMALLOC_PROF - &prof_sample_te_handler, + &prof_sample_te_handler, #endif - &stats_interval_te_handler, - &tcache_gc_te_handler, + &stats_interval_te_handler, &tcache_gc_te_handler, #ifdef JEMALLOC_STATS - &prof_threshold_te_handler, - &peak_te_handler, + &prof_threshold_te_handler, &peak_te_handler, #endif - &user_alloc_handler0, - &user_alloc_handler1, - &user_alloc_handler2, - &user_alloc_handler3 -}; + &user_alloc_handler0, &user_alloc_handler1, &user_alloc_handler2, + &user_alloc_handler3}; -te_base_cb_t *te_dalloc_handlers[te_dalloc_count] = { - &tcache_gc_te_handler, +te_base_cb_t *te_dalloc_handlers[te_dalloc_count] = {&tcache_gc_te_handler, #ifdef JEMALLOC_STATS - &peak_te_handler, + &peak_te_handler, #endif - &user_dalloc_handler0, - &user_dalloc_handler1, - &user_dalloc_handler2, - &user_dalloc_handler3 -}; + &user_dalloc_handler0, &user_dalloc_handler1, &user_dalloc_handler2, + &user_dalloc_handler3}; static inline bool te_update_tsd(tsd_t *tsd, uint64_t new_wait, size_t ue_idx, bool is_alloc) { - bool needs_recompute = false; + bool needs_recompute = false; te_ctx_t ctx; uint64_t next, current, cur_wait; diff --git a/src/ticker.c b/src/ticker.c index 790b5c20..1fd6ac96 100644 --- a/src/ticker.c +++ b/src/ticker.c @@ -20,13 +20,8 @@ * The values here are computed in src/ticker.py */ -const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = { - 254, 211, 187, 169, 156, 144, 135, 127, - 120, 113, 107, 102, 97, 93, 89, 85, - 81, 77, 74, 71, 68, 65, 62, 60, - 57, 55, 53, 50, 48, 46, 44, 42, - 40, 39, 37, 35, 33, 32, 30, 29, - 27, 26, 24, 23, 21, 20, 19, 18, - 16, 15, 14, 13, 12, 10, 9, 8, - 7, 6, 5, 4, 3, 2, 1, 0 -}; +const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = {254, 211, 187, 169, + 156, 144, 135, 127, 120, 113, 107, 102, 97, 93, 89, 85, 81, 77, 74, 71, 68, + 65, 62, 60, 57, 55, 53, 50, 48, 46, 44, 42, 40, 39, 37, 35, 33, 32, 30, 29, + 27, 26, 24, 23, 21, 20, 19, 18, 16, 15, 14, 13, 12, 10, 9, 8, 7, 6, 5, 4, 3, + 2, 1, 0}; diff --git a/src/tsd.c b/src/tsd.c index 0a2ccc59..20042c2d 100644 --- a/src/tsd.c +++ b/src/tsd.c @@ -20,19 +20,20 @@ bool tsd_booted = false; #elif (defined(JEMALLOC_TLS)) JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER; pthread_key_t tsd_tsd; -bool tsd_booted = false; +bool tsd_booted = false; #elif (defined(_WIN32)) -#if defined(JEMALLOC_LEGACY_WINDOWS_SUPPORT) || !defined(_MSC_VER) -DWORD tsd_tsd; +# if defined(JEMALLOC_LEGACY_WINDOWS_SUPPORT) || !defined(_MSC_VER) +DWORD tsd_tsd; tsd_wrapper_t tsd_boot_wrapper = {TSD_INITIALIZER, false}; -#else -JEMALLOC_TSD_TYPE_ATTR(tsd_wrapper_t) tsd_wrapper_tls = { TSD_INITIALIZER, false }; -#endif +# else +JEMALLOC_TSD_TYPE_ATTR(tsd_wrapper_t) +tsd_wrapper_tls = {TSD_INITIALIZER, false}; +# endif bool tsd_booted = false; -#if JEMALLOC_WIN32_TLSGETVALUE2 -TGV2 tls_get_value2 = NULL; +# if JEMALLOC_WIN32_TLSGETVALUE2 +TGV2 tls_get_value2 = NULL; HMODULE tgv2_mod = NULL; -#endif +# endif #else /* @@ -45,17 +46,12 @@ struct tsd_init_head_s { malloc_mutex_t lock; }; -pthread_key_t tsd_tsd; -tsd_init_head_t tsd_init_head = { - ql_head_initializer(blocks), - MALLOC_MUTEX_INITIALIZER -}; +pthread_key_t tsd_tsd; +tsd_init_head_t tsd_init_head = { + ql_head_initializer(blocks), MALLOC_MUTEX_INITIALIZER}; -tsd_wrapper_t tsd_boot_wrapper = { - false, - TSD_INITIALIZER -}; -bool tsd_booted = false; +tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; +bool tsd_booted = false; #endif JEMALLOC_DIAGNOSTIC_POP @@ -64,7 +60,7 @@ JEMALLOC_DIAGNOSTIC_POP /* A list of all the tsds in the nominal state. */ typedef ql_head(tsd_t) tsd_list_t; -static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds); +static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds); static malloc_mutex_t tsd_nominal_tsds_lock; /* How many slow-path-enabling features are turned on. */ @@ -73,13 +69,13 @@ static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0); static bool tsd_in_nominal_list(tsd_t *tsd) { tsd_t *tsd_list; - bool found = false; + bool found = false; /* * We don't know that tsd is nominal; it might not be safe to get data * out of it here. */ malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock); - ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) { + ql_foreach (tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) { if (tsd == tsd_list) { found = true; break; @@ -117,7 +113,7 @@ tsd_force_recompute(tsdn_t *tsdn) { atomic_fence(ATOMIC_RELEASE); malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock); tsd_t *remote_tsd; - ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) { + ql_foreach (remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) { assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED) <= tsd_state_nominal_max); tsd_atomic_store(&remote_tsd->state, @@ -143,7 +139,8 @@ tsd_global_slow_inc(tsdn_t *tsdn) { tsd_force_recompute(tsdn); } -void tsd_global_slow_dec(tsdn_t *tsdn) { +void +tsd_global_slow_dec(tsdn_t *tsdn) { atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED); /* See the note in ..._inc(). */ tsd_force_recompute(tsdn); @@ -180,8 +177,8 @@ tsd_slow_update(tsd_t *tsd) { uint8_t old_state; do { uint8_t new_state = tsd_state_compute(tsd); - old_state = tsd_atomic_exchange(&tsd->state, new_state, - ATOMIC_ACQUIRE); + old_state = tsd_atomic_exchange( + &tsd->state, new_state, ATOMIC_ACQUIRE); } while (old_state == tsd_state_nominal_recompute); te_recompute_fast_threshold(tsd); @@ -211,8 +208,8 @@ tsd_state_set(tsd_t *tsd, uint8_t new_state) { assert(tsd_in_nominal_list(tsd)); if (new_state > tsd_state_nominal_max) { tsd_remove_nominal(tsd); - tsd_atomic_store(&tsd->state, new_state, - ATOMIC_RELAXED); + tsd_atomic_store( + &tsd->state, new_state, ATOMIC_RELAXED); } else { /* * This is the tricky case. We're transitioning from @@ -235,8 +232,7 @@ tsd_prng_state_init(tsd_t *tsd) { * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ - *tsd_prng_statep_get(tsd) = config_debug ? 0 : - (uint64_t)(uintptr_t)tsd; + *tsd_prng_statep_get(tsd) = config_debug ? 0 : (uint64_t)(uintptr_t)tsd; } static bool @@ -264,8 +260,8 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) { static bool tsd_data_init_nocleanup(tsd_t *tsd) { - assert(tsd_state_get(tsd) == tsd_state_reincarnated || - tsd_state_get(tsd) == tsd_state_minimal_initialized); + assert(tsd_state_get(tsd) == tsd_state_reincarnated + || tsd_state_get(tsd) == tsd_state_minimal_initialized); /* * During reincarnation, there is no guarantee that the cleanup function * will be called (deallocation may happen after all tsd destructors). @@ -358,15 +354,15 @@ malloc_tsd_dalloc(void *wrapper) { } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) -static unsigned ncleanups; +static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; -#ifndef _WIN32 +# ifndef _WIN32 JEMALLOC_EXPORT -#endif +# endif void _malloc_thread_cleanup(void) { - bool pending[MALLOC_TSD_CLEANUPS_MAX], again; + bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; for (i = 0; i < ncleanups; i++) { @@ -386,9 +382,9 @@ _malloc_thread_cleanup(void) { } while (again); } -#ifndef _WIN32 +# ifndef _WIN32 JEMALLOC_EXPORT -#endif +# endif void _malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); @@ -446,7 +442,7 @@ tsd_cleanup(void *arg) { } #ifdef JEMALLOC_JET test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); - int *data = tsd_test_datap_get_unsafe(tsd); + int *data = tsd_test_datap_get_unsafe(tsd); if (test_callback != NULL) { test_callback(data); } @@ -461,7 +457,7 @@ malloc_tsd_boot0(void) { ncleanups = 0; #endif if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock", - WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) { + WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) { return NULL; } if (tsd_boot0()) { @@ -483,11 +479,11 @@ malloc_tsd_boot1(void) { static BOOL WINAPI _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { -#ifdef JEMALLOC_LAZY_LOCK +# ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: isthreaded = true; break; -#endif +# endif case DLL_THREAD_DETACH: _malloc_thread_cleanup(); break; @@ -502,35 +498,36 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { * hooked "read". We won't read for the rest of the file, so we can get away * with unhooking. */ -#ifdef read -# undef read +# ifdef read +# undef read +# endif + +# ifdef _MSC_VER +# ifdef _M_IX86 +# pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") +# else +# pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment( \ + linker, "/INCLUDE:" STRINGIFY(tls_callback)) +# endif +# pragma section(".CRT$XLY", long, read) +# endif +JEMALLOC_SECTION(".CRT$XLY") +JEMALLOC_ATTR(used) BOOL(WINAPI *const tls_callback)( + HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif -#ifdef _MSC_VER -# ifdef _M_IX86 -# pragma comment(linker, "/INCLUDE:__tls_used") -# pragma comment(linker, "/INCLUDE:_tls_callback") -# else -# pragma comment(linker, "/INCLUDE:_tls_used") -# pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) ) -# endif -# pragma section(".CRT$XLY",long,read) -#endif -JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, - DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; -#endif - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) \ + && !defined(_WIN32)) void * tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { - pthread_t self = pthread_self(); + pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ malloc_mutex_lock(TSDN_NULL, &head->lock); - ql_foreach(iter, &head->blocks, link) { + ql_foreach (iter, &head->blocks, link) { if (iter->thread == self) { malloc_mutex_unlock(TSDN_NULL, &head->lock); return iter->data; diff --git a/src/util.c b/src/util.c index b73848fb..1bcf4fee 100644 --- a/src/util.c +++ b/src/util.c @@ -8,8 +8,8 @@ bool multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left, size_t *key_start, size_t *key_end, size_t *value) { const char *cur = *setting_segment_cur; - char *end; - uintmax_t um; + char *end; + uintmax_t um; set_errno(0); @@ -46,4 +46,3 @@ multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left, return false; } - diff --git a/src/witness.c b/src/witness.c index 4474af04..940b1eae 100644 --- a/src/witness.c +++ b/src/witness.c @@ -26,8 +26,8 @@ witness_print_witness(witness_t *w, unsigned n) { static void witness_print_witnesses(const witness_list_t *witnesses) { witness_t *w, *last = NULL; - unsigned n = 0; - ql_foreach(w, witnesses, link) { + unsigned n = 0; + ql_foreach (w, witnesses, link) { if (last != NULL && w->rank > last->rank) { assert(w->name != last->name); witness_print_witness(last, n); @@ -45,8 +45,8 @@ witness_print_witnesses(const witness_list_t *witnesses) { } static void -witness_lock_error_impl(const witness_list_t *witnesses, - const witness_t *witness) { +witness_lock_error_impl( + const witness_list_t *witnesses, const witness_t *witness) { malloc_printf(": Lock rank order reversal:"); witness_print_witnesses(witnesses); malloc_printf(" %s(%u)\n", witness->name, witness->rank); @@ -56,8 +56,8 @@ witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; static void witness_owner_error_impl(const witness_t *witness) { - malloc_printf(": Should own %s(%u)\n", witness->name, - witness->rank); + malloc_printf( + ": Should own %s(%u)\n", witness->name, witness->rank); abort(); } witness_owner_error_t *JET_MUTABLE witness_owner_error = @@ -76,7 +76,7 @@ static void witness_depth_error_impl(const witness_list_t *witnesses, witness_rank_t rank_inclusive, unsigned depth) { malloc_printf(": Should own %u lock%s of rank >= %u:", depth, - (depth != 1) ? "s" : "", rank_inclusive); + (depth != 1) ? "s" : "", rank_inclusive); witness_print_witnesses(witnesses); malloc_printf("\n"); abort(); diff --git a/src/zone.c b/src/zone.c index 23dfdd04..e09de4b8 100644 --- a/src/zone.c +++ b/src/zone.c @@ -4,7 +4,7 @@ #include "jemalloc/internal/assert.h" #ifndef JEMALLOC_ZONE -# error "This source file is for zones on Darwin (OS X)." +# error "This source file is for zones on Darwin (OS X)." #endif /* Definitions of the following structs in malloc/malloc.h might be too old @@ -22,10 +22,11 @@ typedef struct _malloc_zone_t { void *(*realloc)(struct _malloc_zone_t *, void *, size_t); void (*destroy)(struct _malloc_zone_t *); const char *zone_name; - unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); + unsigned (*batch_malloc)( + struct _malloc_zone_t *, size_t, void **, unsigned); void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); struct malloc_introspection_t *introspect; - unsigned version; + unsigned version; void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); @@ -33,22 +34,24 @@ typedef struct _malloc_zone_t { typedef struct { vm_address_t address; - vm_size_t size; + vm_size_t size; } vm_range_t; typedef struct malloc_statistics_t { unsigned blocks_in_use; - size_t size_in_use; - size_t max_size_in_use; - size_t size_allocated; + size_t size_in_use; + size_t max_size_in_use; + size_t size_allocated; } malloc_statistics_t; typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); -typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); +typedef void vm_range_recorder_t( + task_t, void *, unsigned type, vm_range_t *, unsigned); typedef struct malloc_introspection_t { - kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); + kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, + memory_reader_t, vm_range_recorder_t); size_t (*good_size)(malloc_zone_t *, size_t); boolean_t (*check)(malloc_zone_t *); void (*print)(malloc_zone_t *, boolean_t); @@ -61,14 +64,16 @@ typedef struct malloc_introspection_t { boolean_t (*disable_discharge_checking)(malloc_zone_t *); void (*discharge)(malloc_zone_t *, void *); #ifdef __BLOCKS__ - void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); + void (*enumerate_discharged_pointers)( + malloc_zone_t *, void (^)(void *, void *)); #else void *enumerate_unavailable_without_blocks; #endif void (*reinit_lock)(malloc_zone_t *); } malloc_introspection_t; -extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); +extern kern_return_t malloc_get_all_zones( + task_t, memory_reader_t, vm_address_t **, unsigned *); extern malloc_zone_t *malloc_default_zone(void); @@ -81,48 +86,46 @@ extern void malloc_zone_unregister(malloc_zone_t *zone); * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) -JEMALLOC_ATTR(weak_import); + JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ -static malloc_zone_t *default_zone, *purgeable_zone; -static malloc_zone_t jemalloc_zone; +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; static struct malloc_introspection_t jemalloc_zone_introspect; -static pid_t zone_force_lock_pid = -1; +static pid_t zone_force_lock_pid = -1; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static size_t zone_size(malloc_zone_t *zone, const void *ptr); -static void *zone_malloc(malloc_zone_t *zone, size_t size); -static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); -static void *zone_valloc(malloc_zone_t *zone, size_t size); -static void zone_free(malloc_zone_t *zone, void *ptr); -static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -static void *zone_memalign(malloc_zone_t *zone, size_t alignment, - size_t size); -static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, - size_t size); -static void zone_destroy(malloc_zone_t *zone); -static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, +static size_t zone_size(malloc_zone_t *zone, const void *ptr); +static void *zone_malloc(malloc_zone_t *zone, size_t size); +static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); +static void *zone_valloc(malloc_zone_t *zone, size_t size); +static void zone_free(malloc_zone_t *zone, void *ptr); +static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +static void *zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size); +static void zone_free_definite_size( + malloc_zone_t *zone, void *ptr, size_t size); +static void zone_destroy(malloc_zone_t *zone); +static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested); -static void zone_batch_free(struct _malloc_zone_t *zone, - void **to_be_freed, unsigned num_to_be_freed); -static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); -static size_t zone_good_size(malloc_zone_t *zone, size_t size); -static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, - vm_address_t zone_address, memory_reader_t reader, +static void zone_batch_free( + struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed); +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); +static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static kern_return_t zone_enumerator(task_t task, void *data, + unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); -static boolean_t zone_check(malloc_zone_t *zone); -static void zone_print(malloc_zone_t *zone, boolean_t verbose); -static void zone_log(malloc_zone_t *zone, void *address); -static void zone_force_lock(malloc_zone_t *zone); -static void zone_force_unlock(malloc_zone_t *zone); -static void zone_statistics(malloc_zone_t *zone, - malloc_statistics_t *stats); -static boolean_t zone_locked(malloc_zone_t *zone); -static void zone_reinit_lock(malloc_zone_t *zone); +static boolean_t zone_check(malloc_zone_t *zone); +static void zone_print(malloc_zone_t *zone, boolean_t verbose); +static void zone_log(malloc_zone_t *zone, void *address); +static void zone_force_lock(malloc_zone_t *zone); +static void zone_force_unlock(malloc_zone_t *zone); +static void zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats); +static boolean_t zone_locked(malloc_zone_t *zone); +static void zone_reinit_lock(malloc_zone_t *zone); /******************************************************************************/ /* @@ -225,8 +228,8 @@ zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, } static void -zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, - unsigned num_to_be_freed) { +zone_batch_free( + struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed) { unsigned i; for (i = 0; i < num_to_be_freed; i++) { @@ -261,12 +264,10 @@ zone_check(malloc_zone_t *zone) { } static void -zone_print(malloc_zone_t *zone, boolean_t verbose) { -} +zone_print(malloc_zone_t *zone, boolean_t verbose) {} static void -zone_log(malloc_zone_t *zone, void *address) { -} +zone_log(malloc_zone_t *zone, void *address) {} static void zone_force_lock(malloc_zone_t *zone) { @@ -369,7 +370,7 @@ zone_init(void) { static malloc_zone_t * zone_default_get(void) { malloc_zone_t **zones = NULL; - unsigned int num_zones = 0; + unsigned int num_zones = 0; /* * On OSX 10.12, malloc_default_zone returns a special zone that is not @@ -380,8 +381,9 @@ zone_default_get(void) { * zone is the default. So get the list of zones to get the first one, * instead of relying on malloc_default_zone. */ - if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, - (vm_address_t**)&zones, &num_zones)) { + if (KERN_SUCCESS + != malloc_get_all_zones( + 0, NULL, (vm_address_t **)&zones, &num_zones)) { /* * Reset the value in case the failure happened after it was * set. @@ -441,8 +443,8 @@ zone_register(void) { * register jemalloc's. */ default_zone = zone_default_get(); - if (!default_zone->zone_name || strcmp(default_zone->zone_name, - "DefaultMallocZone") != 0) { + if (!default_zone->zone_name + || strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { return; } @@ -457,8 +459,9 @@ zone_register(void) { * to check for the existence of malloc_default_purgeable_zone() at * run time. */ - purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : - malloc_default_purgeable_zone(); + purgeable_zone = (malloc_default_purgeable_zone == NULL) + ? NULL + : malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ zone_init(); diff --git a/test/analyze/prof_bias.c b/test/analyze/prof_bias.c index a96ca942..e4bf7942 100644 --- a/test/analyze/prof_bias.c +++ b/test/analyze/prof_bias.c @@ -46,15 +46,15 @@ do_allocs(size_t sz, size_t cnt, bool do_frees) { int main(void) { size_t lg_prof_sample_local = 19; - int err = mallctl("prof.reset", NULL, NULL, - (void *)&lg_prof_sample_local, sizeof(lg_prof_sample_local)); + int err = mallctl("prof.reset", NULL, NULL, + (void *)&lg_prof_sample_local, sizeof(lg_prof_sample_local)); assert(err == 0); prof_backtrace_hook_set(mock_backtrace); do_allocs(16, 32 * 1024 * 1024, /* do_frees */ true); - do_allocs(32 * 1024* 1024, 16, /* do_frees */ true); + do_allocs(32 * 1024 * 1024, 16, /* do_frees */ true); do_allocs(16, 32 * 1024 * 1024, /* do_frees */ false); - do_allocs(32 * 1024* 1024, 16, /* do_frees */ false); + do_allocs(32 * 1024 * 1024, 16, /* do_frees */ false); return 0; } diff --git a/test/analyze/rand.c b/test/analyze/rand.c index bb20b06e..4c7e18c7 100644 --- a/test/analyze/rand.c +++ b/test/analyze/rand.c @@ -72,13 +72,13 @@ print_buckets(const size_t buckets[], const size_t means[], if (buckets[i] + stddevs[i] <= means[i]) { malloc_write(" "); for (size_t t = means[i] - buckets[i]; t >= stddevs[i]; - t -= stddevs[i]) { + t -= stddevs[i]) { malloc_write("-"); } } else if (buckets[i] >= means[i] + stddevs[i]) { malloc_write(" "); for (size_t t = buckets[i] - means[i]; t >= stddevs[i]; - t -= stddevs[i]) { + t -= stddevs[i]) { malloc_write("+"); } } @@ -93,8 +93,8 @@ bucket_analysis(uint64_t (*gen)(void *), void *opaque, size_t buckets[], for (size_t i = 1; i <= 3; ++i) { malloc_printf("round %zu\n", i); fill(buckets, n_bucket, 0); - collect_buckets(gen, opaque, buckets, n_bucket, - lg_bucket_width, n_iter); + collect_buckets( + gen, opaque, buckets, n_bucket, lg_bucket_width, n_iter); print_buckets(buckets, means, stddevs, n_bucket); } } @@ -108,7 +108,7 @@ bucket_analysis(uint64_t (*gen)(void *), void *opaque, size_t buckets[], typedef struct uniform_gen_arg_s uniform_gen_arg_t; struct uniform_gen_arg_s { - uint64_t state; + uint64_t state; const unsigned lg_range; }; @@ -131,8 +131,10 @@ TEST_BEGIN(test_uniform) { * integers, and that the minimal bucket mean is at least * MIN_BUCKET_MEAN. */ - const size_t q = 1 << QUOTIENT_CEIL(LG_CEIL(QUOTIENT_CEIL( - MIN_BUCKET_MEAN, N_BUCKET * (N_BUCKET - 1))), 2); + const size_t q = 1 << QUOTIENT_CEIL( + LG_CEIL(QUOTIENT_CEIL( + MIN_BUCKET_MEAN, N_BUCKET * (N_BUCKET - 1))), + 2); const size_t stddev = (N_BUCKET - 1) * q; const size_t mean = N_BUCKET * stddev * q; const size_t n_iter = N_BUCKET * mean; @@ -142,14 +144,14 @@ TEST_BEGIN(test_uniform) { size_t stddevs[N_BUCKET]; fill(stddevs, N_BUCKET, stddev); - uniform_gen_arg_t arg = {(uint64_t)(uintptr_t)&lg_range_test, - lg_range_test}; + uniform_gen_arg_t arg = { + (uint64_t)(uintptr_t)&lg_range_test, lg_range_test}; size_t buckets[N_BUCKET]; assert_zu_ge(lg_range_test, LG_N_BUCKET, ""); const size_t lg_bucket_width = lg_range_test - LG_N_BUCKET; - bucket_analysis(uniform_gen, &arg, buckets, means, stddevs, - N_BUCKET, lg_bucket_width, n_iter); + bucket_analysis(uniform_gen, &arg, buckets, means, stddevs, N_BUCKET, + lg_bucket_width, n_iter); #undef LG_N_BUCKET #undef N_BUCKET @@ -168,8 +170,8 @@ TEST_END * comments in test_prof_sample for explanations for n_divide. */ static double -fill_geometric_proportions(double proportions[], const size_t n_bucket, - const size_t n_divide) { +fill_geometric_proportions( + double proportions[], const size_t n_bucket, const size_t n_divide) { assert(n_bucket > 0); assert(n_divide > 0); double x = 1.; @@ -220,12 +222,12 @@ TEST_BEGIN(test_prof_sample) { #ifdef JEMALLOC_PROF /* Number of divisions within [0, mean). */ -#define LG_N_DIVIDE 3 -#define N_DIVIDE (1 << LG_N_DIVIDE) +# define LG_N_DIVIDE 3 +# define N_DIVIDE (1 << LG_N_DIVIDE) /* Coverage of buckets in terms of multiples of mean. */ -#define LG_N_MULTIPLY 2 -#define N_GEO_BUCKET (N_DIVIDE << LG_N_MULTIPLY) +# define LG_N_MULTIPLY 2 +# define N_GEO_BUCKET (N_DIVIDE << LG_N_MULTIPLY) test_skip_if(!opt_prof); @@ -233,14 +235,15 @@ TEST_BEGIN(test_prof_sample) { size_t lg_prof_sample_orig = lg_prof_sample; assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_test, - sizeof(size_t)), 0, ""); + sizeof(size_t)), + 0, ""); malloc_printf("lg_prof_sample = %zu\n", lg_prof_sample_test); - double proportions[N_GEO_BUCKET + 1]; - const double min_proportion = fill_geometric_proportions(proportions, - N_GEO_BUCKET + 1, N_DIVIDE); - const size_t n_iter = round_to_nearest(MIN_BUCKET_MEAN / - min_proportion); + double proportions[N_GEO_BUCKET + 1]; + const double min_proportion = fill_geometric_proportions( + proportions, N_GEO_BUCKET + 1, N_DIVIDE); + const size_t n_iter = round_to_nearest( + MIN_BUCKET_MEAN / min_proportion); size_t means[N_GEO_BUCKET + 1]; size_t stddevs[N_GEO_BUCKET + 1]; fill_references(means, stddevs, proportions, N_GEO_BUCKET + 1, n_iter); @@ -255,12 +258,13 @@ TEST_BEGIN(test_prof_sample) { N_GEO_BUCKET + 1, lg_bucket_width, n_iter); assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_orig, - sizeof(size_t)), 0, ""); + sizeof(size_t)), + 0, ""); -#undef LG_N_DIVIDE -#undef N_DIVIDE -#undef LG_N_MULTIPLY -#undef N_GEO_BUCKET +# undef LG_N_DIVIDE +# undef N_DIVIDE +# undef LG_N_MULTIPLY +# undef N_GEO_BUCKET #endif /* JEMALLOC_PROF */ } @@ -270,7 +274,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_uniform, - test_prof_sample); + return test_no_reentrancy(test_uniform, test_prof_sample); } diff --git a/test/analyze/sizes.c b/test/analyze/sizes.c index cfb5ce51..cc6c3806 100644 --- a/test/analyze/sizes.c +++ b/test/analyze/sizes.c @@ -11,9 +11,9 @@ static void do_print(const char *name, size_t sz_bytes) { - const char *sizes[] = {"bytes", "KB", "MB", "GB", "TB", "PB", "EB", - "ZB"}; - size_t sizes_max = sizeof(sizes)/sizeof(sizes[0]); + const char *sizes[] = { + "bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"}; + size_t sizes_max = sizeof(sizes) / sizeof(sizes[0]); size_t ind = 0; double sz = sz_bytes; @@ -30,8 +30,7 @@ do_print(const char *name, size_t sz_bytes) { int main(void) { -#define P(type) \ - do_print(#type, sizeof(type)) +#define P(type) do_print(#type, sizeof(type)) P(arena_t); P(arena_stats_t); P(base_t); diff --git a/test/include/test/SFMT-alti.h b/test/include/test/SFMT-alti.h index a1885dbf..d6a85ad1 100644 --- a/test/include/test/SFMT-alti.h +++ b/test/include/test/SFMT-alti.h @@ -61,58 +61,59 @@ * @return output */ JEMALLOC_ALWAYS_INLINE -vector unsigned int vec_recursion(vector unsigned int a, - vector unsigned int b, - vector unsigned int c, - vector unsigned int d) { - - const vector unsigned int sl1 = ALTI_SL1; - const vector unsigned int sr1 = ALTI_SR1; +vector unsigned int +vec_recursion(vector unsigned int a, vector unsigned int b, + vector unsigned int c, vector unsigned int d) { + const vector unsigned int sl1 = ALTI_SL1; + const vector unsigned int sr1 = ALTI_SR1; #ifdef ONLY64 - const vector unsigned int mask = ALTI_MSK64; - const vector unsigned char perm_sl = ALTI_SL2_PERM64; - const vector unsigned char perm_sr = ALTI_SR2_PERM64; + const vector unsigned int mask = ALTI_MSK64; + const vector unsigned char perm_sl = ALTI_SL2_PERM64; + const vector unsigned char perm_sr = ALTI_SR2_PERM64; #else - const vector unsigned int mask = ALTI_MSK; - const vector unsigned char perm_sl = ALTI_SL2_PERM; - const vector unsigned char perm_sr = ALTI_SR2_PERM; + const vector unsigned int mask = ALTI_MSK; + const vector unsigned char perm_sl = ALTI_SL2_PERM; + const vector unsigned char perm_sr = ALTI_SR2_PERM; #endif - vector unsigned int v, w, x, y, z; - x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); - v = a; - y = vec_sr(b, sr1); - z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); - w = vec_sl(d, sl1); - z = vec_xor(z, w); - y = vec_and(y, mask); - v = vec_xor(v, x); - z = vec_xor(z, y); - z = vec_xor(z, v); - return z; + vector unsigned int v, w, x, y, z; + x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); + v = a; + y = vec_sr(b, sr1); + z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); + w = vec_sl(d, sl1); + z = vec_xor(z, w); + y = vec_and(y, mask); + v = vec_xor(v, x); + z = vec_xor(z, y); + z = vec_xor(z, v); + return z; } /** * This function fills the internal state array with pseudorandom * integers. */ -static inline void gen_rand_all(sfmt_t *ctx) { - int i; - vector unsigned int r, r1, r2; +static inline void +gen_rand_all(sfmt_t *ctx) { + int i; + vector unsigned int r, r1, r2; - r1 = ctx->sfmt[N - 2].s; - r2 = ctx->sfmt[N - 1].s; - for (i = 0; i < N - POS1; i++) { - r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); - ctx->sfmt[i].s = r; - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); - ctx->sfmt[i].s = r; - r1 = r2; - r2 = r; - } + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion( + ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion( + ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } } /** @@ -122,50 +123,57 @@ static inline void gen_rand_all(sfmt_t *ctx) { * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ -static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { - int i, j; - vector unsigned int r, r1, r2; +static inline void +gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + vector unsigned int r, r1, r2; - r1 = ctx->sfmt[N - 2].s; - r2 = ctx->sfmt[N - 1].s; - for (i = 0; i < N - POS1; i++) { - r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); - array[i].s = r; - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); - array[i].s = r; - r1 = r2; - r2 = r; - } - /* main loop */ - for (; i < size - N; i++) { - r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); - array[i].s = r; - r1 = r2; - r2 = r; - } - for (j = 0; j < 2 * N - size; j++) { - ctx->sfmt[j].s = array[j + size - N].s; - } - for (; i < size; i++) { - r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); - array[i].s = r; - ctx->sfmt[j++].s = r; - r1 = r2; - r2 = r; - } + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion( + ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion( + ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = vec_recursion( + array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j].s = array[j + size - N].s; + } + for (; i < size; i++) { + r = vec_recursion( + array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + ctx->sfmt[j++].s = r; + r1 = r2; + r2 = r; + } } #ifndef ONLY64 -#if defined(__APPLE__) -#define ALTI_SWAP (vector unsigned char) \ - (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) -#else -#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} -#endif +# if defined(__APPLE__) +# define ALTI_SWAP \ + (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, \ + 14, 15, 8, 9, 10, 11) +# else +# define ALTI_SWAP \ + { 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11 } +# endif /** * This function swaps high and low 32-bit of 64-bit integers in user * specified array. @@ -173,13 +181,15 @@ static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ -static inline void swap(w128_t *array, int size) { - int i; - const vector unsigned char perm = ALTI_SWAP; +static inline void +swap(w128_t *array, int size) { + int i; + const vector unsigned char perm = ALTI_SWAP; - for (i = 0; i < size; i++) { - array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); - } + for (i = 0; i < size; i++) { + array[i].s = vec_perm( + array[i].s, (vector unsigned int)perm, perm); + } } #endif diff --git a/test/include/test/SFMT-params.h b/test/include/test/SFMT-params.h index 6730adf8..4ff4316f 100644 --- a/test/include/test/SFMT-params.h +++ b/test/include/test/SFMT-params.h @@ -37,10 +37,10 @@ #define SFMT_PARAMS_H #if !defined(MEXP) -#ifdef __GNUC__ - #warning "MEXP is not defined. I assume MEXP is 19937." -#endif - #define MEXP 19937 +# ifdef __GNUC__ +# warning "MEXP is not defined. I assume MEXP is 19937." +# endif +# define MEXP 19937 #endif /*----------------- BASIC DEFINITIONS @@ -100,32 +100,32 @@ */ #if MEXP == 607 - #include "test/SFMT-params607.h" +# include "test/SFMT-params607.h" #elif MEXP == 1279 - #include "test/SFMT-params1279.h" +# include "test/SFMT-params1279.h" #elif MEXP == 2281 - #include "test/SFMT-params2281.h" +# include "test/SFMT-params2281.h" #elif MEXP == 4253 - #include "test/SFMT-params4253.h" +# include "test/SFMT-params4253.h" #elif MEXP == 11213 - #include "test/SFMT-params11213.h" +# include "test/SFMT-params11213.h" #elif MEXP == 19937 - #include "test/SFMT-params19937.h" +# include "test/SFMT-params19937.h" #elif MEXP == 44497 - #include "test/SFMT-params44497.h" +# include "test/SFMT-params44497.h" #elif MEXP == 86243 - #include "test/SFMT-params86243.h" +# include "test/SFMT-params86243.h" #elif MEXP == 132049 - #include "test/SFMT-params132049.h" +# include "test/SFMT-params132049.h" #elif MEXP == 216091 - #include "test/SFMT-params216091.h" +# include "test/SFMT-params216091.h" #else -#ifdef __GNUC__ - #error "MEXP is not valid." - #undef MEXP -#else - #undef MEXP -#endif +# ifdef __GNUC__ +# error "MEXP is not valid." +# undef MEXP +# else +# undef MEXP +# endif #endif diff --git a/test/include/test/SFMT-params11213.h b/test/include/test/SFMT-params11213.h index 2994bd21..d2ab5b7c 100644 --- a/test/include/test/SFMT-params11213.h +++ b/test/include/test/SFMT-params11213.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS11213_H #define SFMT_PARAMS11213_H -#define POS1 68 -#define SL1 14 -#define SL2 3 -#define SR1 7 -#define SR2 3 -#define MSK1 0xeffff7fbU -#define MSK2 0xffffffefU -#define MSK3 0xdfdfbfffU -#define MSK4 0x7fffdbfdU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0xe8148000U -#define PARITY4 0xd0c7afa3U - +#define POS1 68 +#define SL1 14 +#define SL2 3 +#define SR1 7 +#define SR2 3 +#define MSK1 0xeffff7fbU +#define MSK2 0xffffffefU +#define MSK3 0xdfdfbfffU +#define MSK4 0x7fffdbfdU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xe8148000U +#define PARITY4 0xd0c7afa3U /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} - #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} -#endif /* For OSX */ -#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, \ + 15, 8, 9, 10) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, \ + 14, 15, 0, 1, 2) +# define ALTI_SR2_PERM \ + (vector unsigned char)(5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, \ + 8, 19, 19, 19, 12) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(13, 14, 15, 0, 1, 2, 3, 4, 19, 19, 19, \ + 8, 9, 10, 11, 12) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10 } +# define ALTI_SL2_PERM64 \ + { 3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, 14, 15, 0, 1, 2 } +# define ALTI_SR2_PERM \ + { 5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, 8, 19, 19, 19, 12 } +# define ALTI_SR2_PERM64 \ + { 13, 14, 15, 0, 1, 2, 3, 4, 19, 19, 19, 8, 9, 10, 11, 12 } +#endif /* For OSX */ +#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" #endif /* SFMT_PARAMS11213_H */ diff --git a/test/include/test/SFMT-params1279.h b/test/include/test/SFMT-params1279.h index d7959f98..1be5c01d 100644 --- a/test/include/test/SFMT-params1279.h +++ b/test/include/test/SFMT-params1279.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS1279_H #define SFMT_PARAMS1279_H -#define POS1 7 -#define SL1 14 -#define SL2 3 -#define SR1 5 -#define SR2 1 -#define MSK1 0xf7fefffdU -#define MSK2 0x7fefcfffU -#define MSK3 0xaff3ef3fU -#define MSK4 0xb5ffff7fU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x20000000U - +#define POS1 7 +#define SL1 14 +#define SL2 3 +#define SR1 5 +#define SR2 1 +#define MSK1 0xf7fefffdU +#define MSK2 0x7fefcfffU +#define MSK3 0xaff3ef3fU +#define MSK4 0xb5ffff7fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x20000000U /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, \ + 15, 8, 9, 10) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, \ + 14, 15, 0, 1, 2) +# define ALTI_SR2_PERM \ + (vector unsigned char)(7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, \ + 17, 12, 13, 14) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, \ + 11, 12, 13, 14) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10 } +# define ALTI_SL2_PERM64 \ + { 3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, 14, 15, 0, 1, 2 } +# define ALTI_SR2_PERM \ + { 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, 17, 12, 13, 14 } +# define ALTI_SR2_PERM64 \ + { 15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, 11, 12, 13, 14 } +#endif /* For OSX */ +#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" #endif /* SFMT_PARAMS1279_H */ diff --git a/test/include/test/SFMT-params132049.h b/test/include/test/SFMT-params132049.h index a1dcec39..1002614b 100644 --- a/test/include/test/SFMT-params132049.h +++ b/test/include/test/SFMT-params132049.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS132049_H #define SFMT_PARAMS132049_H -#define POS1 110 -#define SL1 19 -#define SL2 1 -#define SR1 21 -#define SR2 1 -#define MSK1 0xffffbb5fU -#define MSK2 0xfb6ebf95U -#define MSK3 0xfffefffaU -#define MSK4 0xcff77fffU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0xcb520000U -#define PARITY4 0xc7e91c7dU - +#define POS1 110 +#define SL1 19 +#define SL2 1 +#define SR1 21 +#define SR2 1 +#define MSK1 0xffffbb5fU +#define MSK2 0xfb6ebf95U +#define MSK3 0xfffefffaU +#define MSK4 0xcff77fffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xcb520000U +#define PARITY4 0xc7e91c7dU /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, \ + 13, 14, 15, 8) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, \ + 13, 14, 15, 0) +# define ALTI_SR2_PERM \ + (vector unsigned char)(7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, \ + 17, 12, 13, 14) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, \ + 11, 12, 13, 14) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, 8 } +# define ALTI_SL2_PERM64 \ + { 1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, 13, 14, 15, 0 } +# define ALTI_SR2_PERM \ + { 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, 17, 12, 13, 14 } +# define ALTI_SR2_PERM64 \ + { 15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, 11, 12, 13, 14 } +#endif /* For OSX */ +#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" #endif /* SFMT_PARAMS132049_H */ diff --git a/test/include/test/SFMT-params19937.h b/test/include/test/SFMT-params19937.h index fb92b4c9..71df2713 100644 --- a/test/include/test/SFMT-params19937.h +++ b/test/include/test/SFMT-params19937.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS19937_H #define SFMT_PARAMS19937_H -#define POS1 122 -#define SL1 18 -#define SL2 1 -#define SR1 11 -#define SR2 1 -#define MSK1 0xdfffffefU -#define MSK2 0xddfecb7fU -#define MSK3 0xbffaffffU -#define MSK4 0xbffffff6U -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x13c9e684U - +#define POS1 122 +#define SL1 18 +#define SL2 1 +#define SR1 11 +#define SR2 1 +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x13c9e684U /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, \ + 13, 14, 15, 8) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, \ + 13, 14, 15, 0) +# define ALTI_SR2_PERM \ + (vector unsigned char)(7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, \ + 17, 12, 13, 14) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, \ + 11, 12, 13, 14) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, 8 } +# define ALTI_SL2_PERM64 \ + { 1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, 13, 14, 15, 0 } +# define ALTI_SR2_PERM \ + { 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, 17, 12, 13, 14 } +# define ALTI_SR2_PERM64 \ + { 15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, 11, 12, 13, 14 } +#endif /* For OSX */ +#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" #endif /* SFMT_PARAMS19937_H */ diff --git a/test/include/test/SFMT-params216091.h b/test/include/test/SFMT-params216091.h index 125ce282..d2d240e2 100644 --- a/test/include/test/SFMT-params216091.h +++ b/test/include/test/SFMT-params216091.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS216091_H #define SFMT_PARAMS216091_H -#define POS1 627 -#define SL1 11 -#define SL2 3 -#define SR1 10 -#define SR2 1 -#define MSK1 0xbff7bff7U -#define MSK2 0xbfffffffU -#define MSK3 0xbffffa7fU -#define MSK4 0xffddfbfbU -#define PARITY1 0xf8000001U -#define PARITY2 0x89e80709U -#define PARITY3 0x3bd2b64bU -#define PARITY4 0x0c64b1e4U - +#define POS1 627 +#define SL1 11 +#define SL2 3 +#define SR1 10 +#define SR2 1 +#define MSK1 0xbff7bff7U +#define MSK2 0xbfffffffU +#define MSK3 0xbffffa7fU +#define MSK4 0xffddfbfbU +#define PARITY1 0xf8000001U +#define PARITY2 0x89e80709U +#define PARITY3 0x3bd2b64bU +#define PARITY4 0x0c64b1e4U /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, \ + 15, 8, 9, 10) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, \ + 14, 15, 0, 1, 2) +# define ALTI_SR2_PERM \ + (vector unsigned char)(7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, \ + 17, 12, 13, 14) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, \ + 11, 12, 13, 14) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10 } +# define ALTI_SL2_PERM64 \ + { 3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, 14, 15, 0, 1, 2 } +# define ALTI_SR2_PERM \ + { 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, 17, 12, 13, 14 } +# define ALTI_SR2_PERM64 \ + { 15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, 11, 12, 13, 14 } +#endif /* For OSX */ +#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" #endif /* SFMT_PARAMS216091_H */ diff --git a/test/include/test/SFMT-params2281.h b/test/include/test/SFMT-params2281.h index 0ef85c40..97b8de68 100644 --- a/test/include/test/SFMT-params2281.h +++ b/test/include/test/SFMT-params2281.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS2281_H #define SFMT_PARAMS2281_H -#define POS1 12 -#define SL1 19 -#define SL2 1 -#define SR1 5 -#define SR2 1 -#define MSK1 0xbff7ffbfU -#define MSK2 0xfdfffffeU -#define MSK3 0xf7ffef7fU -#define MSK4 0xf2f7cbbfU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x41dfa600U - +#define POS1 12 +#define SL1 19 +#define SL2 1 +#define SR1 5 +#define SR2 1 +#define MSK1 0xbff7ffbfU +#define MSK2 0xfdfffffeU +#define MSK3 0xf7ffef7fU +#define MSK4 0xf2f7cbbfU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x41dfa600U /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, \ + 13, 14, 15, 8) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, \ + 13, 14, 15, 0) +# define ALTI_SR2_PERM \ + (vector unsigned char)(7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, \ + 17, 12, 13, 14) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, \ + 11, 12, 13, 14) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, 8 } +# define ALTI_SL2_PERM64 \ + { 1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, 13, 14, 15, 0 } +# define ALTI_SR2_PERM \ + { 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, 17, 12, 13, 14 } +# define ALTI_SR2_PERM64 \ + { 15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, 11, 12, 13, 14 } +#endif /* For OSX */ +#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" #endif /* SFMT_PARAMS2281_H */ diff --git a/test/include/test/SFMT-params4253.h b/test/include/test/SFMT-params4253.h index 9f07bc67..7e51edd8 100644 --- a/test/include/test/SFMT-params4253.h +++ b/test/include/test/SFMT-params4253.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS4253_H #define SFMT_PARAMS4253_H -#define POS1 17 -#define SL1 20 -#define SL2 1 -#define SR1 7 -#define SR2 1 -#define MSK1 0x9f7bffffU -#define MSK2 0x9fffff5fU -#define MSK3 0x3efffffbU -#define MSK4 0xfffff7bbU -#define PARITY1 0xa8000001U -#define PARITY2 0xaf5390a3U -#define PARITY3 0xb740b3f8U -#define PARITY4 0x6c11486dU - +#define POS1 17 +#define SL1 20 +#define SL2 1 +#define SR1 7 +#define SR2 1 +#define MSK1 0x9f7bffffU +#define MSK2 0x9fffff5fU +#define MSK3 0x3efffffbU +#define MSK4 0xfffff7bbU +#define PARITY1 0xa8000001U +#define PARITY2 0xaf5390a3U +#define PARITY3 0xb740b3f8U +#define PARITY4 0x6c11486dU /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} - #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, \ + 13, 14, 15, 8) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, \ + 13, 14, 15, 0) +# define ALTI_SR2_PERM \ + (vector unsigned char)(7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, \ + 17, 12, 13, 14) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, \ + 11, 12, 13, 14) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 1, 2, 3, 23, 5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, 8 } +# define ALTI_SL2_PERM64 \ + { 1, 2, 3, 4, 5, 6, 7, 31, 9, 10, 11, 12, 13, 14, 15, 0 } +# define ALTI_SR2_PERM \ + { 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, 17, 12, 13, 14 } +# define ALTI_SR2_PERM64 \ + { 15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, 11, 12, 13, 14 } +#endif /* For OSX */ +#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" #endif /* SFMT_PARAMS4253_H */ diff --git a/test/include/test/SFMT-params44497.h b/test/include/test/SFMT-params44497.h index 85598fed..8f6fee7b 100644 --- a/test/include/test/SFMT-params44497.h +++ b/test/include/test/SFMT-params44497.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS44497_H #define SFMT_PARAMS44497_H -#define POS1 330 -#define SL1 5 -#define SL2 3 -#define SR1 9 -#define SR2 3 -#define MSK1 0xeffffffbU -#define MSK2 0xdfbebfffU -#define MSK3 0xbfbf7befU -#define MSK4 0x9ffd7bffU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0xa3ac4000U -#define PARITY4 0xecc1327aU - +#define POS1 330 +#define SL1 5 +#define SL2 3 +#define SR1 9 +#define SR2 3 +#define MSK1 0xeffffffbU +#define MSK2 0xdfbebfffU +#define MSK3 0xbfbf7befU +#define MSK4 0x9ffd7bffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xa3ac4000U +#define PARITY4 0xecc1327aU /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} - #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} -#endif /* For OSX */ -#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, \ + 15, 8, 9, 10) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, \ + 14, 15, 0, 1, 2) +# define ALTI_SR2_PERM \ + (vector unsigned char)(5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, \ + 8, 19, 19, 19, 12) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(13, 14, 15, 0, 1, 2, 3, 4, 19, 19, 19, \ + 8, 9, 10, 11, 12) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10 } +# define ALTI_SL2_PERM64 \ + { 3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, 14, 15, 0, 1, 2 } +# define ALTI_SR2_PERM \ + { 5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, 8, 19, 19, 19, 12 } +# define ALTI_SR2_PERM64 \ + { 13, 14, 15, 0, 1, 2, 3, 4, 19, 19, 19, 8, 9, 10, 11, 12 } +#endif /* For OSX */ +#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" #endif /* SFMT_PARAMS44497_H */ diff --git a/test/include/test/SFMT-params607.h b/test/include/test/SFMT-params607.h index bc76485f..29fb3913 100644 --- a/test/include/test/SFMT-params607.h +++ b/test/include/test/SFMT-params607.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS607_H #define SFMT_PARAMS607_H -#define POS1 2 -#define SL1 15 -#define SL2 3 -#define SR1 13 -#define SR2 3 -#define MSK1 0xfdff37ffU -#define MSK2 0xef7f3f7dU -#define MSK3 0xff777b7dU -#define MSK4 0x7ff7fb2fU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0x5986f054U - +#define POS1 2 +#define SL1 15 +#define SL2 3 +#define SR1 13 +#define SR2 3 +#define MSK1 0xfdff37ffU +#define MSK2 0xef7f3f7dU +#define MSK3 0xff777b7dU +#define MSK4 0x7ff7fb2fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x5986f054U /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) - #define ALTI_SR2_PERM \ - (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} - #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} - #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} - #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} -#endif /* For OSX */ -#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, \ + 15, 8, 9, 10) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, \ + 14, 15, 0, 1, 2) +# define ALTI_SR2_PERM \ + (vector unsigned char)(5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, \ + 8, 19, 19, 19, 12) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(13, 14, 15, 0, 1, 2, 3, 4, 19, 19, 19, \ + 8, 9, 10, 11, 12) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 3, 21, 21, 21, 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10 } +# define ALTI_SL2_PERM64 \ + { 3, 4, 5, 6, 7, 29, 29, 29, 11, 12, 13, 14, 15, 0, 1, 2 } +# define ALTI_SR2_PERM \ + { 5, 6, 7, 0, 9, 10, 11, 4, 13, 14, 15, 8, 19, 19, 19, 12 } +# define ALTI_SR2_PERM64 \ + { 13, 14, 15, 0, 1, 2, 3, 4, 19, 19, 19, 8, 9, 10, 11, 12 } +#endif /* For OSX */ +#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" #endif /* SFMT_PARAMS607_H */ diff --git a/test/include/test/SFMT-params86243.h b/test/include/test/SFMT-params86243.h index 5e4d783c..5e3747e9 100644 --- a/test/include/test/SFMT-params86243.h +++ b/test/include/test/SFMT-params86243.h @@ -36,46 +36,56 @@ #ifndef SFMT_PARAMS86243_H #define SFMT_PARAMS86243_H -#define POS1 366 -#define SL1 6 -#define SL2 7 -#define SR1 19 -#define SR2 1 -#define MSK1 0xfdbffbffU -#define MSK2 0xbff7ff3fU -#define MSK3 0xfd77efffU -#define MSK4 0xbf9ff3ffU -#define PARITY1 0x00000001U -#define PARITY2 0x00000000U -#define PARITY3 0x00000000U -#define PARITY4 0xe9528d85U - +#define POS1 366 +#define SL1 6 +#define SL2 7 +#define SR1 19 +#define SR2 1 +#define MSK1 0xfdbffbffU +#define MSK2 0xbff7ff3fU +#define MSK3 0xfd77efffU +#define MSK4 0xbf9ff3ffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xe9528d85U /* PARAMETERS FOR ALTIVEC */ -#if defined(__APPLE__) /* For OSX */ - #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) - #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) - #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) - #define ALTI_MSK64 \ - (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) - #define ALTI_SL2_PERM \ - (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) - #define ALTI_SL2_PERM64 \ - (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) - #define ALTI_SR2_PERM \ - (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) - #define ALTI_SR2_PERM64 \ - (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) -#else /* For OTHER OSs(Linux?) */ - #define ALTI_SL1 {SL1, SL1, SL1, SL1} - #define ALTI_SR1 {SR1, SR1, SR1, SR1} - #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} - #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} - #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} - #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} - #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} - #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} -#endif /* For OSX */ -#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" +#if defined(__APPLE__) /* For OSX */ +# define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) +# define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) +# define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) +# define ALTI_MSK64 (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) +# define ALTI_SL2_PERM \ + (vector unsigned char)(25, 25, 25, 25, 3, 25, 25, 25, 7, 0, 1, \ + 2, 11, 4, 5, 6) +# define ALTI_SL2_PERM64 \ + (vector unsigned char)(7, 25, 25, 25, 25, 25, 25, 25, 15, 0, \ + 1, 2, 3, 4, 5, 6) +# define ALTI_SR2_PERM \ + (vector unsigned char)(7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, \ + 17, 12, 13, 14) +# define ALTI_SR2_PERM64 \ + (vector unsigned char)(15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, \ + 11, 12, 13, 14) +#else /* For OTHER OSs(Linux?) */ +# define ALTI_SL1 \ + { SL1, SL1, SL1, SL1 } +# define ALTI_SR1 \ + { SR1, SR1, SR1, SR1 } +# define ALTI_MSK \ + { MSK1, MSK2, MSK3, MSK4 } +# define ALTI_MSK64 \ + { MSK2, MSK1, MSK4, MSK3 } +# define ALTI_SL2_PERM \ + { 25, 25, 25, 25, 3, 25, 25, 25, 7, 0, 1, 2, 11, 4, 5, 6 } +# define ALTI_SL2_PERM64 \ + { 7, 25, 25, 25, 25, 25, 25, 25, 15, 0, 1, 2, 3, 4, 5, 6 } +# define ALTI_SR2_PERM \ + { 7, 0, 1, 2, 11, 4, 5, 6, 15, 8, 9, 10, 17, 12, 13, 14 } +# define ALTI_SR2_PERM64 \ + { 15, 0, 1, 2, 3, 4, 5, 6, 17, 8, 9, 10, 11, 12, 13, 14 } +#endif /* For OSX */ +#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" #endif /* SFMT_PARAMS86243_H */ diff --git a/test/include/test/SFMT-sse2.h b/test/include/test/SFMT-sse2.h index 169ad558..83b35b43 100644 --- a/test/include/test/SFMT-sse2.h +++ b/test/include/test/SFMT-sse2.h @@ -60,48 +60,49 @@ * @param mask 128-bit mask * @return output */ -JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, - __m128i c, __m128i d, __m128i mask) { - __m128i v, x, y, z; +JEMALLOC_ALWAYS_INLINE __m128i +mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { + __m128i v, x, y, z; - x = _mm_load_si128(a); - y = _mm_srli_epi32(*b, SR1); - z = _mm_srli_si128(c, SR2); - v = _mm_slli_epi32(d, SL1); - z = _mm_xor_si128(z, x); - z = _mm_xor_si128(z, v); - x = _mm_slli_si128(x, SL2); - y = _mm_and_si128(y, mask); - z = _mm_xor_si128(z, x); - z = _mm_xor_si128(z, y); - return z; + x = _mm_load_si128(a); + y = _mm_srli_epi32(*b, SR1); + z = _mm_srli_si128(c, SR2); + v = _mm_slli_epi32(d, SL1); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, v); + x = _mm_slli_si128(x, SL2); + y = _mm_and_si128(y, mask); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, y); + return z; } /** * This function fills the internal state array with pseudorandom * integers. */ -static inline void gen_rand_all(sfmt_t *ctx) { - int i; - __m128i r, r1, r2, mask; - mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); +static inline void +gen_rand_all(sfmt_t *ctx) { + int i; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); - r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); - r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); - for (i = 0; i < N - POS1; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, - mask); - _mm_store_si128(&ctx->sfmt[i].si, r); - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&ctx->sfmt[i].si, r); - r1 = r2; - r2 = r; - } + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion( + &ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, + r1, r2, mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } } /** @@ -111,47 +112,48 @@ static inline void gen_rand_all(sfmt_t *ctx) { * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ -static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { - int i, j; - __m128i r, r1, r2, mask; - mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); +static inline void +gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); - r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); - r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); - for (i = 0; i < N - POS1; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - r1 = r2; - r2 = r; - } - for (; i < N; i++) { - r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - r1 = r2; - r2 = r; - } - /* main loop */ - for (; i < size - N; i++) { - r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - r1 = r2; - r2 = r; - } - for (j = 0; j < 2 * N - size; j++) { - r = _mm_load_si128(&array[j + size - N].si); - _mm_store_si128(&ctx->sfmt[j].si, r); - } - for (; i < size; i++) { - r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, - mask); - _mm_store_si128(&array[i].si, r); - _mm_store_si128(&ctx->sfmt[j++].si, r); - r1 = r2; - r2 = r; - } + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion( + &ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion( + &ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = mm_recursion( + &array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + r = _mm_load_si128(&array[j + size - N].si); + _mm_store_si128(&ctx->sfmt[j].si, r); + } + for (; i < size; i++) { + r = mm_recursion( + &array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); + _mm_store_si128(&array[i].si, r); + _mm_store_si128(&ctx->sfmt[j++].si, r); + r1 = r2; + r2 = r; + } } #endif diff --git a/test/include/test/SFMT.h b/test/include/test/SFMT.h index 338dd45c..0082c026 100644 --- a/test/include/test/SFMT.h +++ b/test/include/test/SFMT.h @@ -68,79 +68,89 @@ typedef struct sfmt_s sfmt_t; -uint32_t gen_rand32(sfmt_t *ctx); -uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); -uint64_t gen_rand64(sfmt_t *ctx); -uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); -void fill_array32(sfmt_t *ctx, uint32_t *array, int size); -void fill_array64(sfmt_t *ctx, uint64_t *array, int size); -sfmt_t *init_gen_rand(uint32_t seed); -sfmt_t *init_by_array(uint32_t *init_key, int key_length); -void fini_gen_rand(sfmt_t *ctx); +uint32_t gen_rand32(sfmt_t *ctx); +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); +uint64_t gen_rand64(sfmt_t *ctx); +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); +void fill_array32(sfmt_t *ctx, uint32_t *array, int size); +void fill_array64(sfmt_t *ctx, uint64_t *array, int size); +sfmt_t *init_gen_rand(uint32_t seed); +sfmt_t *init_by_array(uint32_t *init_key, int key_length); +void fini_gen_rand(sfmt_t *ctx); const char *get_idstring(void); -int get_min_array_size32(void); -int get_min_array_size64(void); +int get_min_array_size32(void); +int get_min_array_size64(void); /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ -static inline double to_real1(uint32_t v) { - return v * (1.0/4294967295.0); - /* divided by 2^32-1 */ +static inline double +to_real1(uint32_t v) { + return v * (1.0 / 4294967295.0); + /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ -static inline double genrand_real1(sfmt_t *ctx) { - return to_real1(gen_rand32(ctx)); +static inline double +genrand_real1(sfmt_t *ctx) { + return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ -static inline double to_real2(uint32_t v) { - return v * (1.0/4294967296.0); - /* divided by 2^32 */ +static inline double +to_real2(uint32_t v) { + return v * (1.0 / 4294967296.0); + /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ -static inline double genrand_real2(sfmt_t *ctx) { - return to_real2(gen_rand32(ctx)); +static inline double +genrand_real2(sfmt_t *ctx) { + return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ -static inline double to_real3(uint32_t v) { - return (((double)v) + 0.5)*(1.0/4294967296.0); - /* divided by 2^32 */ +static inline double +to_real3(uint32_t v) { + return (((double)v) + 0.5) * (1.0 / 4294967296.0); + /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ -static inline double genrand_real3(sfmt_t *ctx) { - return to_real3(gen_rand32(ctx)); +static inline double +genrand_real3(sfmt_t *ctx) { + return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ -static inline double to_res53(uint64_t v) { - return v * (1.0/18446744073709551616.0L); +static inline double +to_res53(uint64_t v) { + return v * (1.0 / 18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ -static inline double to_res53_mix(uint32_t x, uint32_t y) { - return to_res53(x | ((uint64_t)y << 32)); +static inline double +to_res53_mix(uint32_t x, uint32_t y) { + return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ -static inline double genrand_res53(sfmt_t *ctx) { - return to_res53(gen_rand64(ctx)); +static inline double +genrand_res53(sfmt_t *ctx) { + return to_res53(gen_rand64(ctx)); } /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ -static inline double genrand_res53_mix(sfmt_t *ctx) { - uint32_t x, y; +static inline double +genrand_res53_mix(sfmt_t *ctx) { + uint32_t x, y; - x = gen_rand32(ctx); - y = gen_rand32(ctx); - return to_res53_mix(x, y); + x = gen_rand32(ctx); + y = gen_rand32(ctx); + return to_res53_mix(x, y); } #endif diff --git a/test/include/test/arena_util.h b/test/include/test/arena_util.h index 535c1aa1..431fdfae 100644 --- a/test/include/test/arena_util.h +++ b/test/include/test/arena_util.h @@ -1,25 +1,25 @@ static inline unsigned do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { unsigned arena_ind; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); size_t mib[3]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); - expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen), - 0, "Unexpected mallctlnametomib() failure"); + expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, - (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0, - "Unexpected mallctlbymib() failure"); + (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), + 0, "Unexpected mallctlbymib() failure"); - expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen), - 0, "Unexpected mallctlnametomib() failure"); + expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, - (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0, - "Unexpected mallctlbymib() failure"); + (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), + 0, "Unexpected mallctlbymib() failure"); return arena_ind; } @@ -33,7 +33,7 @@ do_arena_destroy(unsigned arena_ind) { mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); size_t mib[3]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; @@ -51,7 +51,7 @@ do_epoch(void) { static inline void do_purge(unsigned arena_ind) { size_t mib[3]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; @@ -62,7 +62,7 @@ do_purge(unsigned arena_ind) { static inline void do_decay(unsigned arena_ind) { size_t mib[3]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; @@ -73,12 +73,12 @@ do_decay(unsigned arena_ind) { static inline uint64_t get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { size_t mib[4]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib(mibname, mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; uint64_t npurge = 0; - size_t sz = sizeof(npurge); + size_t sz = sizeof(npurge); expect_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure"); return npurge; @@ -105,15 +105,15 @@ get_arena_muzzy_npurge(unsigned arena_ind) { static inline uint64_t get_arena_npurge(unsigned arena_ind) { do_epoch(); - return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + - get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); + return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + + get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); } static inline size_t get_arena_pdirty(unsigned arena_ind) { do_epoch(); size_t mib[4]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; @@ -128,7 +128,7 @@ static inline size_t get_arena_pmuzzy(unsigned arena_ind) { do_epoch(); size_t mib[4]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[2] = (size_t)arena_ind; @@ -148,8 +148,7 @@ do_mallocx(size_t size, int flags) { static inline void generate_dirty(unsigned arena_ind, size_t size) { - int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; void *p = do_mallocx(size, flags); dallocx(p, flags); } - diff --git a/test/include/test/bench.h b/test/include/test/bench.h index e2a9fc09..faebfd77 100644 --- a/test/include/test/bench.h +++ b/test/include/test/bench.h @@ -1,6 +1,6 @@ static inline void -time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, - void (*func)(void)) { +time_func( + timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void)) { uint64_t i; for (i = 0; i < nwarmup; i++) { @@ -23,16 +23,16 @@ fmt_nsecs(uint64_t usec, uint64_t iters, char *buf) { uint64_t nsecs_per_iter1000 = nsec1000 / iters; uint64_t intpart = nsecs_per_iter1000 / 1000; uint64_t fracpart = nsecs_per_iter1000 % 1000; - malloc_snprintf(buf, FMT_NSECS_BUF_SIZE, "%" FMTu64 ".%03" FMTu64, intpart, - fracpart); + malloc_snprintf(buf, FMT_NSECS_BUF_SIZE, "%" FMTu64 ".%03" FMTu64, + intpart, fracpart); } static inline void compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, - void (*func_a), const char *name_b, void (*func_b)) { + void(*func_a), const char *name_b, void(*func_b)) { timedelta_t timer_a, timer_b; - char ratio_buf[6]; - void *p; + char ratio_buf[6]; + void *p; p = mallocx(1, 0); if (p == NULL) { @@ -44,16 +44,18 @@ compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, time_func(&timer_b, nwarmup, niter, (void (*)(void))func_b); uint64_t usec_a = timer_usec(&timer_a); - char buf_a[FMT_NSECS_BUF_SIZE]; + char buf_a[FMT_NSECS_BUF_SIZE]; fmt_nsecs(usec_a, niter, buf_a); uint64_t usec_b = timer_usec(&timer_b); - char buf_b[FMT_NSECS_BUF_SIZE]; + char buf_b[FMT_NSECS_BUF_SIZE]; fmt_nsecs(usec_b, niter, buf_b); timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf)); - malloc_printf("%" FMTu64 " iterations, %s=%" FMTu64 "us (%s ns/iter), " - "%s=%" FMTu64 "us (%s ns/iter), time consumption ratio=%s:1\n", + malloc_printf("%" FMTu64 " iterations, %s=%" FMTu64 + "us (%s ns/iter), " + "%s=%" FMTu64 + "us (%s ns/iter), time consumption ratio=%s:1\n", niter, name_a, usec_a, buf_a, name_b, usec_b, buf_b, ratio_buf); dallocx(p, 0); @@ -62,10 +64,10 @@ compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, static inline void * no_opt_ptr(void *ptr) { #ifdef JEMALLOC_HAVE_ASM_VOLATILE - asm volatile("" : "+r"(ptr)); + asm volatile("" : "+r"(ptr)); #else - void *volatile dup = ptr; - ptr = dup; + void *volatile dup = ptr; + ptr = dup; #endif - return ptr; + return ptr; } diff --git a/test/include/test/bgthd.h b/test/include/test/bgthd.h index 4fa2395e..0a7e789b 100644 --- a/test/include/test/bgthd.h +++ b/test/include/test/bgthd.h @@ -5,9 +5,9 @@ static inline bool is_background_thread_enabled(void) { - bool enabled; + bool enabled; size_t sz = sizeof(bool); - int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); + int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL, 0); if (ret == ENOENT) { return false; } diff --git a/test/include/test/btalloc.h b/test/include/test/btalloc.h index 8f345993..04a336d5 100644 --- a/test/include/test/btalloc.h +++ b/test/include/test/btalloc.h @@ -1,30 +1,28 @@ /* btalloc() provides a mechanism for allocating via permuted backtraces. */ -void *btalloc(size_t size, unsigned bits); +void *btalloc(size_t size, unsigned bits); -#define btalloc_n_proto(n) \ -void *btalloc_##n(size_t size, unsigned bits); -btalloc_n_proto(0) -btalloc_n_proto(1) +#define btalloc_n_proto(n) void *btalloc_##n(size_t size, unsigned bits); +btalloc_n_proto(0) btalloc_n_proto(1) -#define btalloc_n_gen(n) \ -void * \ -btalloc_##n(size_t size, unsigned bits) { \ - void *p; \ - \ - if (bits == 0) { \ - p = mallocx(size, 0); \ - } else { \ - switch (bits & 0x1U) { \ - case 0: \ - p = (btalloc_0(size, bits >> 1)); \ - break; \ - case 1: \ - p = (btalloc_1(size, bits >> 1)); \ - break; \ - default: not_reached(); \ - } \ - } \ - /* Intentionally sabotage tail call optimization. */ \ - expect_ptr_not_null(p, "Unexpected mallocx() failure"); \ - return p; \ -} +#define btalloc_n_gen(n) \ + void *btalloc_##n(size_t size, unsigned bits) { \ + void *p; \ + \ + if (bits == 0) { \ + p = mallocx(size, 0); \ + } else { \ + switch (bits & 0x1U) { \ + case 0: \ + p = (btalloc_0(size, bits >> 1)); \ + break; \ + case 1: \ + p = (btalloc_1(size, bits >> 1)); \ + break; \ + default: \ + not_reached(); \ + } \ + } \ + /* Intentionally sabotage tail call optimization. */ \ + expect_ptr_not_null(p, "Unexpected mallocx() failure"); \ + return p; \ + } diff --git a/test/include/test/extent_hooks.h b/test/include/test/extent_hooks.h index aad0a46c..33bb8593 100644 --- a/test/include/test/extent_hooks.h +++ b/test/include/test/extent_hooks.h @@ -3,40 +3,33 @@ * passthrough. */ -static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, +static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); -static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, - size_t size, bool committed, unsigned arena_ind); -static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, - size_t size, bool committed, unsigned arena_ind); -static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t offset, size_t length, unsigned arena_ind); -static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t offset, size_t length, unsigned arena_ind); -static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t offset, size_t length, unsigned arena_ind); -static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, - void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); -static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t size_a, size_t size_b, bool committed, - unsigned arena_ind); -static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, - size_t size_a, void *addr_b, size_t size_b, bool committed, - unsigned arena_ind); +static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); static extent_hooks_t *default_hooks; -static extent_hooks_t hooks = { - extent_alloc_hook, - extent_dalloc_hook, - extent_destroy_hook, - extent_commit_hook, - extent_decommit_hook, - extent_purge_lazy_hook, - extent_purge_forced_hook, - extent_split_hook, - extent_merge_hook -}; +static extent_hooks_t hooks = {extent_alloc_hook, extent_dalloc_hook, + extent_destroy_hook, extent_commit_hook, extent_decommit_hook, + extent_purge_lazy_hook, extent_purge_forced_hook, extent_split_hook, + extent_merge_hook}; /* Control whether hook functions pass calls through to default hooks. */ static bool try_alloc = true; @@ -72,9 +65,9 @@ static bool did_split; static bool did_merge; #if 0 -# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) +# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) #else -# define TRACE_HOOK(fmt, ...) +# define TRACE_HOOK(fmt, ...) #endif static void * @@ -82,20 +75,21 @@ extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { void *ret; - TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, " - "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks, - new_addr, size, alignment, *zero ? "true" : "false", *commit ? - "true" : "false", arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, " + "*zero=%s, *commit=%s, arena_ind=%u)\n", + __func__, extent_hooks, new_addr, size, alignment, + *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); - expect_ptr_eq(extent_hooks->alloc, extent_alloc_hook, - "Wrong hook function"); + expect_ptr_eq( + extent_hooks->alloc, extent_alloc_hook, "Wrong hook function"); called_alloc = true; if (!try_alloc) { return NULL; } - ret = default_hooks->alloc(default_hooks, new_addr, size, alignment, - zero, commit, 0); + ret = default_hooks->alloc( + default_hooks, new_addr, size, alignment, zero, commit, 0); did_alloc = (ret != NULL); return ret; } @@ -105,13 +99,15 @@ extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { bool err; - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " - "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? - "true" : "false", arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", + __func__, extent_hooks, addr, size, committed ? "true" : "false", + arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); - expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook, - "Wrong hook function"); + expect_ptr_eq( + extent_hooks->dalloc, extent_dalloc_hook, "Wrong hook function"); called_dalloc = true; if (!try_dalloc) { return true; @@ -124,13 +120,15 @@ extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " - "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? - "true" : "false", arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", + __func__, extent_hooks, addr, size, committed ? "true" : "false", + arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); - expect_ptr_eq(extent_hooks->destroy, extent_destroy_hook, - "Wrong hook function"); + expect_ptr_eq( + extent_hooks->destroy, extent_destroy_hook, "Wrong hook function"); called_destroy = true; if (!try_destroy) { return; @@ -144,19 +142,20 @@ extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " - "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, - offset, length, arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", + __func__, extent_hooks, addr, size, offset, length, arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); - expect_ptr_eq(extent_hooks->commit, extent_commit_hook, - "Wrong hook function"); + expect_ptr_eq( + extent_hooks->commit, extent_commit_hook, "Wrong hook function"); called_commit = true; if (!try_commit) { return true; } - err = default_hooks->commit(default_hooks, addr, size, offset, length, - 0); + err = default_hooks->commit( + default_hooks, addr, size, offset, length, 0); did_commit = !err; return err; } @@ -166,9 +165,10 @@ extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " - "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, - offset, length, arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", + __func__, extent_hooks, addr, size, offset, length, arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); expect_ptr_eq(extent_hooks->decommit, extent_decommit_hook, @@ -177,8 +177,8 @@ extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, if (!try_decommit) { return true; } - err = default_hooks->decommit(default_hooks, addr, size, offset, length, - 0); + err = default_hooks->decommit( + default_hooks, addr, size, offset, length, 0); did_decommit = !err; return err; } @@ -188,9 +188,10 @@ extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " - "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, - offset, length, arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", + __func__, extent_hooks, addr, size, offset, length, arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); expect_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook, @@ -199,9 +200,9 @@ extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, if (!try_purge_lazy) { return true; } - err = default_hooks->purge_lazy == NULL || - default_hooks->purge_lazy(default_hooks, addr, size, offset, length, - 0); + err = default_hooks->purge_lazy == NULL + || default_hooks->purge_lazy( + default_hooks, addr, size, offset, length, 0); did_purge_lazy = !err; return err; } @@ -211,9 +212,10 @@ extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " - "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, - offset, length, arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", + __func__, extent_hooks, addr, size, offset, length, arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); expect_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook, @@ -222,9 +224,9 @@ extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, if (!try_purge_forced) { return true; } - err = default_hooks->purge_forced == NULL || - default_hooks->purge_forced(default_hooks, addr, size, offset, - length, 0); + err = default_hooks->purge_forced == NULL + || default_hooks->purge_forced( + default_hooks, addr, size, offset, length, 0); did_purge_forced = !err; return err; } @@ -234,21 +236,22 @@ extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { bool err; - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, " - "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, - addr, size, size_a, size_b, committed ? "true" : "false", - arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, " + "size_b=%zu, committed=%s, arena_ind=%u)\n", + __func__, extent_hooks, addr, size, size_a, size_b, + committed ? "true" : "false", arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); - expect_ptr_eq(extent_hooks->split, extent_split_hook, - "Wrong hook function"); + expect_ptr_eq( + extent_hooks->split, extent_split_hook, "Wrong hook function"); called_split = true; if (!try_split) { return true; } - err = (default_hooks->split == NULL || - default_hooks->split(default_hooks, addr, size, size_a, size_b, - committed, 0)); + err = (default_hooks->split == NULL + || default_hooks->split( + default_hooks, addr, size, size_a, size_b, committed, 0)); did_split = !err; return err; } @@ -258,23 +261,24 @@ extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { bool err; - TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p " - "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, - addr_a, size_a, addr_b, size_b, committed ? "true" : "false", - arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p " + "size_b=%zu, committed=%s, arena_ind=%u)\n", + __func__, extent_hooks, addr_a, size_a, addr_b, size_b, + committed ? "true" : "false", arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); - expect_ptr_eq(extent_hooks->merge, extent_merge_hook, - "Wrong hook function"); + expect_ptr_eq( + extent_hooks->merge, extent_merge_hook, "Wrong hook function"); expect_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b, "Extents not mergeable"); called_merge = true; if (!try_merge) { return true; } - err = (default_hooks->merge == NULL || - default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b, - committed, 0)); + err = (default_hooks->merge == NULL + || default_hooks->merge( + default_hooks, addr_a, size_a, addr_b, size_b, committed, 0)); did_merge = !err; return err; } @@ -285,5 +289,6 @@ extent_hooks_prep(void) { sz = sizeof(default_hooks); expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz, - NULL, 0), 0, "Unexpected mallctl() error"); + NULL, 0), + 0, "Unexpected mallctl() error"); } diff --git a/test/include/test/fork.h b/test/include/test/fork.h index ac9b1858..9e04d279 100644 --- a/test/include/test/fork.h +++ b/test/include/test/fork.h @@ -3,7 +3,7 @@ #ifndef _WIN32 -#include +# include static inline void fork_wait_for_child_exit(int pid) { @@ -13,8 +13,10 @@ fork_wait_for_child_exit(int pid) { test_fail("Unexpected waitpid() failure."); } if (WIFSIGNALED(status)) { - test_fail("Unexpected child termination due to " - "signal %d", WTERMSIG(status)); + test_fail( + "Unexpected child termination due to " + "signal %d", + WTERMSIG(status)); break; } if (WIFEXITED(status)) { diff --git a/test/include/test/math.h b/test/include/test/math.h index efba086d..c9b32e91 100644 --- a/test/include/test/math.h +++ b/test/include/test/math.h @@ -27,9 +27,12 @@ ln_gamma(double x) { z = 1.0 / (x * x); - return f + (x-0.5) * log(x) - x + 0.918938533204673 + - (((-0.000595238095238 * z + 0.000793650793651) * z - - 0.002777777777778) * z + 0.083333333333333) / x; + return f + (x - 0.5) * log(x) - x + 0.918938533204673 + + (((-0.000595238095238 * z + 0.000793650793651) * z + - 0.002777777777778) + * z + + 0.083333333333333) + / x; } /* @@ -43,8 +46,8 @@ ln_gamma(double x) { */ static inline double i_gamma(double x, double p, double ln_gamma_p) { - double acu, factor, oflo, gin, term, rn, a, b, an, dif; - double pn[6]; + double acu, factor, oflo, gin, term, rn, a, b, an, dif; + double pn[6]; unsigned i; assert(p > 0.0); @@ -91,7 +94,7 @@ i_gamma(double x, double p, double ln_gamma_p) { term += 1.0; an = a * term; for (i = 0; i < 2; i++) { - pn[i+4] = b * pn[i+2] - an * pn[i]; + pn[i + 4] = b * pn[i + 2] - an * pn[i]; } if (pn[5] != 0.0) { rn = pn[4] / pn[5]; @@ -103,7 +106,7 @@ i_gamma(double x, double p, double ln_gamma_p) { gin = rn; } for (i = 0; i < 4; i++) { - pn[i] = pn[i+2]; + pn[i] = pn[i + 2]; } if (fabs(pn[4]) >= oflo) { @@ -135,16 +138,35 @@ pt_norm(double p) { if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; - return q * (((((((2.5090809287301226727e3 * r + - 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r - + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * - r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) - * r + 3.3871328727963666080e0) / - (((((((5.2264952788528545610e3 * r + - 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r - + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * - r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) - * r + 1.0); + return q + * (((((((2.5090809287301226727e3 * r + + 3.3430575583588128105e4) + * r + + 6.7265770927008700853e4) + * r + + 4.5921953931549871457e4) + * r + + 1.3731693765509461125e4) + * r + + 1.9715909503065514427e3) + * r + + 1.3314166789178437745e2) + * r + + 3.3871328727963666080e0) + / (((((((5.2264952788528545610e3 * r + + 2.8729085735721942674e4) + * r + + 3.9307895800092710610e4) + * r + + 2.1213794301586595867e4) + * r + + 5.3941960214247511077e3) + * r + + 6.8718700749205790830e2) + * r + + 4.2313330701600911252e1) + * r + + 1.0); } else { if (q < 0.0) { r = p; @@ -157,40 +179,65 @@ pt_norm(double p) { if (r <= 5.0) { /* p neither close to 1/2 nor 0 or 1. */ r -= 1.6; - ret = ((((((((7.74545014278341407640e-4 * r + - 2.27238449892691845833e-2) * r + - 2.41780725177450611770e-1) * r + - 1.27045825245236838258e0) * r + - 3.64784832476320460504e0) * r + - 5.76949722146069140550e0) * r + - 4.63033784615654529590e0) * r + - 1.42343711074968357734e0) / - (((((((1.05075007164441684324e-9 * r + - 5.47593808499534494600e-4) * r + - 1.51986665636164571966e-2) - * r + 1.48103976427480074590e-1) * r + - 6.89767334985100004550e-1) * r + - 1.67638483018380384940e0) * r + - 2.05319162663775882187e0) * r + 1.0)); + ret = ((((((((7.74545014278341407640e-4 * r + + 2.27238449892691845833e-2) + * r + + 2.41780725177450611770e-1) + * r + + 1.27045825245236838258e0) + * r + + 3.64784832476320460504e0) + * r + + 5.76949722146069140550e0) + * r + + 4.63033784615654529590e0) + * r + + 1.42343711074968357734e0) + / (((((((1.05075007164441684324e-9 * r + + 5.47593808499534494600e-4) + * r + + 1.51986665636164571966e-2) + * r + + 1.48103976427480074590e-1) + * r + + 6.89767334985100004550e-1) + * r + + 1.67638483018380384940e0) + * r + + 2.05319162663775882187e0) + * r + + 1.0)); } else { /* p near 0 or 1. */ r -= 5.0; - ret = ((((((((2.01033439929228813265e-7 * r + - 2.71155556874348757815e-5) * r + - 1.24266094738807843860e-3) * r + - 2.65321895265761230930e-2) * r + - 2.96560571828504891230e-1) * r + - 1.78482653991729133580e0) * r + - 5.46378491116411436990e0) * r + - 6.65790464350110377720e0) / - (((((((2.04426310338993978564e-15 * r + - 1.42151175831644588870e-7) * r + - 1.84631831751005468180e-5) * r + - 7.86869131145613259100e-4) * r + - 1.48753612908506148525e-2) * r + - 1.36929880922735805310e-1) * r + - 5.99832206555887937690e-1) - * r + 1.0)); + ret = ((((((((2.01033439929228813265e-7 * r + + 2.71155556874348757815e-5) + * r + + 1.24266094738807843860e-3) + * r + + 2.65321895265761230930e-2) + * r + + 2.96560571828504891230e-1) + * r + + 1.78482653991729133580e0) + * r + + 5.46378491116411436990e0) + * r + + 6.65790464350110377720e0) + / (((((((2.04426310338993978564e-15 * r + + 1.42151175831644588870e-7) + * r + + 1.84631831751005468180e-5) + * r + + 7.86869131145613259100e-4) + * r + + 1.48753612908506148525e-2) + * r + + 1.36929880922735805310e-1) + * r + + 5.99832206555887937690e-1) + * r + + 1.0)); } if (q < 0.0) { ret = -ret; @@ -244,8 +291,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2) { ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); /* Starting approximation for p tending to 1. */ if (ch > 2.2 * df + 6.0) { - ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + - ln_gamma_df_2); + ch = -2.0 + * (log(1.0 - p) - c * log(0.5 * ch) + + ln_gamma_df_2); } } else { ch = 0.4; @@ -254,10 +302,13 @@ pt_chi2(double p, double df, double ln_gamma_df_2) { q = ch; p1 = 1.0 + ch * (4.67 + ch); p2 = ch * (6.73 + ch * (6.66 + ch)); - t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch - * (13.32 + 3.0 * ch)) / p2; - ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + - c * aa) * p2 / p1) / t; + t = -0.5 + (4.67 + 2.0 * ch) / p1 + - (6.73 + ch * (13.32 + 3.0 * ch)) / p2; + ch -= (1.0 + - exp(a + ln_gamma_df_2 + 0.5 * ch + + c * aa) + * p2 / p1) + / t; if (fabs(q / ch - 1.0) - 0.01 <= 0.0) { break; } @@ -276,17 +327,36 @@ pt_chi2(double p, double df, double ln_gamma_df_2) { t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; a = 0.5 * t - b * c; - s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + - 60.0 * a))))) / 420.0; - s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * - a)))) / 2520.0; + s1 = (210.0 + + a + * (140.0 + + a + * (105.0 + + a * (84.0 + a * (70.0 + 60.0 * a))))) + / 420.0; + s2 = + (420.0 + + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * a)))) + / 2520.0; s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; - s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * - (889.0 + 1740.0 * a))) / 5040.0; + s4 = (252.0 + a * (672.0 + 1182.0 * a) + + c * (294.0 + a * (889.0 + 1740.0 * a))) + / 5040.0; s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; - ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - - b * (s4 - b * (s5 - b * s6)))))); + ch += t + * (1.0 + 0.5 * t * s1 + - b * c + * (s1 + - b + * (s2 + - b + * (s3 + - b + * (s4 + - b + * (s5 + - b * s6)))))); if (fabs(q / ch - 1.0) <= e) { break; } diff --git a/test/include/test/mq.h b/test/include/test/mq.h index 5dc6486c..4a68d709 100644 --- a/test/include/test/mq.h +++ b/test/include/test/mq.h @@ -26,82 +26,74 @@ * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ -#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) +#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) -#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ -typedef struct { \ - mtx_t lock; \ - ql_head(a_mq_msg_type) msgs; \ - unsigned count; \ -} a_mq_type; \ -a_attr bool \ -a_prefix##init(a_mq_type *mq) { \ - \ - if (mtx_init(&mq->lock)) { \ - return true; \ - } \ - ql_new(&mq->msgs); \ - mq->count = 0; \ - return false; \ -} \ -a_attr void \ -a_prefix##fini(a_mq_type *mq) { \ - mtx_fini(&mq->lock); \ -} \ -a_attr unsigned \ -a_prefix##count(a_mq_type *mq) { \ - unsigned count; \ - \ - mtx_lock(&mq->lock); \ - count = mq->count; \ - mtx_unlock(&mq->lock); \ - return count; \ -} \ -a_attr a_mq_msg_type * \ -a_prefix##tryget(a_mq_type *mq) { \ - a_mq_msg_type *msg; \ - \ - mtx_lock(&mq->lock); \ - msg = ql_first(&mq->msgs); \ - if (msg != NULL) { \ - ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ - mq->count--; \ - } \ - mtx_unlock(&mq->lock); \ - return msg; \ -} \ -a_attr a_mq_msg_type * \ -a_prefix##get(a_mq_type *mq) { \ - a_mq_msg_type *msg; \ - unsigned ns; \ - \ - msg = a_prefix##tryget(mq); \ - if (msg != NULL) { \ - return msg; \ - } \ - \ - ns = 1; \ - while (true) { \ - sleep_ns(ns); \ - msg = a_prefix##tryget(mq); \ - if (msg != NULL) { \ - return msg; \ - } \ - if (ns < 1000*1000*1000) { \ - /* Double sleep time, up to max 1 second. */ \ - ns <<= 1; \ - if (ns > 1000*1000*1000) { \ - ns = 1000*1000*1000; \ - } \ - } \ - } \ -} \ -a_attr void \ -a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ - \ - mtx_lock(&mq->lock); \ - ql_elm_new(msg, a_field); \ - ql_tail_insert(&mq->msgs, msg, a_field); \ - mq->count++; \ - mtx_unlock(&mq->lock); \ -} +#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ + typedef struct { \ + mtx_t lock; \ + ql_head(a_mq_msg_type) msgs; \ + unsigned count; \ + } a_mq_type; \ + a_attr bool a_prefix##init(a_mq_type *mq) { \ + if (mtx_init(&mq->lock)) { \ + return true; \ + } \ + ql_new(&mq->msgs); \ + mq->count = 0; \ + return false; \ + } \ + a_attr void a_prefix##fini(a_mq_type *mq) { \ + mtx_fini(&mq->lock); \ + } \ + a_attr unsigned a_prefix##count(a_mq_type *mq) { \ + unsigned count; \ + \ + mtx_lock(&mq->lock); \ + count = mq->count; \ + mtx_unlock(&mq->lock); \ + return count; \ + } \ + a_attr a_mq_msg_type *a_prefix##tryget(a_mq_type *mq) { \ + a_mq_msg_type *msg; \ + \ + mtx_lock(&mq->lock); \ + msg = ql_first(&mq->msgs); \ + if (msg != NULL) { \ + ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ + mq->count--; \ + } \ + mtx_unlock(&mq->lock); \ + return msg; \ + } \ + a_attr a_mq_msg_type *a_prefix##get(a_mq_type *mq) { \ + a_mq_msg_type *msg; \ + unsigned ns; \ + \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) { \ + return msg; \ + } \ + \ + ns = 1; \ + while (true) { \ + sleep_ns(ns); \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) { \ + return msg; \ + } \ + if (ns < 1000 * 1000 * 1000) { \ + /* Double sleep time, up to max 1 second. */ \ + ns <<= 1; \ + if (ns > 1000 * 1000 * 1000) { \ + ns = 1000 * 1000 * 1000; \ + } \ + } \ + } \ + } \ + a_attr void a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ + mtx_lock(&mq->lock); \ + ql_elm_new(msg, a_field); \ + ql_tail_insert(&mq->msgs, msg, a_field); \ + mq->count++; \ + mtx_unlock(&mq->lock); \ + } diff --git a/test/include/test/mtx.h b/test/include/test/mtx.h index 066a2137..c771ca3a 100644 --- a/test/include/test/mtx.h +++ b/test/include/test/mtx.h @@ -7,15 +7,15 @@ typedef struct { #ifdef _WIN32 - CRITICAL_SECTION lock; + CRITICAL_SECTION lock; #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock lock; + os_unfair_lock lock; #else - pthread_mutex_t lock; + pthread_mutex_t lock; #endif } mtx_t; -bool mtx_init(mtx_t *mtx); -void mtx_fini(mtx_t *mtx); -void mtx_lock(mtx_t *mtx); -void mtx_unlock(mtx_t *mtx); +bool mtx_init(mtx_t *mtx); +void mtx_fini(mtx_t *mtx); +void mtx_lock(mtx_t *mtx); +void mtx_unlock(mtx_t *mtx); diff --git a/test/include/test/nbits.h b/test/include/test/nbits.h index c06cf1b4..2c30a61c 100644 --- a/test/include/test/nbits.h +++ b/test/include/test/nbits.h @@ -3,109 +3,109 @@ /* Interesting bitmap counts to test. */ -#define NBITS_TAB \ - NB( 1) \ - NB( 2) \ - NB( 3) \ - NB( 4) \ - NB( 5) \ - NB( 6) \ - NB( 7) \ - NB( 8) \ - NB( 9) \ - NB(10) \ - NB(11) \ - NB(12) \ - NB(13) \ - NB(14) \ - NB(15) \ - NB(16) \ - NB(17) \ - NB(18) \ - NB(19) \ - NB(20) \ - NB(21) \ - NB(22) \ - NB(23) \ - NB(24) \ - NB(25) \ - NB(26) \ - NB(27) \ - NB(28) \ - NB(29) \ - NB(30) \ - NB(31) \ - NB(32) \ - \ - NB(33) \ - NB(34) \ - NB(35) \ - NB(36) \ - NB(37) \ - NB(38) \ - NB(39) \ - NB(40) \ - NB(41) \ - NB(42) \ - NB(43) \ - NB(44) \ - NB(45) \ - NB(46) \ - NB(47) \ - NB(48) \ - NB(49) \ - NB(50) \ - NB(51) \ - NB(52) \ - NB(53) \ - NB(54) \ - NB(55) \ - NB(56) \ - NB(57) \ - NB(58) \ - NB(59) \ - NB(60) \ - NB(61) \ - NB(62) \ - NB(63) \ - NB(64) \ - NB(65) \ - NB(66) \ - NB(67) \ - \ - NB(126) \ - NB(127) \ - NB(128) \ - NB(129) \ - NB(130) \ - \ - NB(254) \ - NB(255) \ - NB(256) \ - NB(257) \ - NB(258) \ - \ - NB(510) \ - NB(511) \ - NB(512) \ - NB(513) \ - NB(514) \ - \ - NB(1022) \ - NB(1023) \ - NB(1024) \ - NB(1025) \ - NB(1026) \ - \ - NB(2048) \ - \ - NB(4094) \ - NB(4095) \ - NB(4096) \ - NB(4097) \ - NB(4098) \ - \ - NB(8192) \ - NB(16384) +#define NBITS_TAB \ + NB(1) \ + NB(2) \ + NB(3) \ + NB(4) \ + NB(5) \ + NB(6) \ + NB(7) \ + NB(8) \ + NB(9) \ + NB(10) \ + NB(11) \ + NB(12) \ + NB(13) \ + NB(14) \ + NB(15) \ + NB(16) \ + NB(17) \ + NB(18) \ + NB(19) \ + NB(20) \ + NB(21) \ + NB(22) \ + NB(23) \ + NB(24) \ + NB(25) \ + NB(26) \ + NB(27) \ + NB(28) \ + NB(29) \ + NB(30) \ + NB(31) \ + NB(32) \ + \ + NB(33) \ + NB(34) \ + NB(35) \ + NB(36) \ + NB(37) \ + NB(38) \ + NB(39) \ + NB(40) \ + NB(41) \ + NB(42) \ + NB(43) \ + NB(44) \ + NB(45) \ + NB(46) \ + NB(47) \ + NB(48) \ + NB(49) \ + NB(50) \ + NB(51) \ + NB(52) \ + NB(53) \ + NB(54) \ + NB(55) \ + NB(56) \ + NB(57) \ + NB(58) \ + NB(59) \ + NB(60) \ + NB(61) \ + NB(62) \ + NB(63) \ + NB(64) \ + NB(65) \ + NB(66) \ + NB(67) \ + \ + NB(126) \ + NB(127) \ + NB(128) \ + NB(129) \ + NB(130) \ + \ + NB(254) \ + NB(255) \ + NB(256) \ + NB(257) \ + NB(258) \ + \ + NB(510) \ + NB(511) \ + NB(512) \ + NB(513) \ + NB(514) \ + \ + NB(1022) \ + NB(1023) \ + NB(1024) \ + NB(1025) \ + NB(1026) \ + \ + NB(2048) \ + \ + NB(4094) \ + NB(4095) \ + NB(4096) \ + NB(4097) \ + NB(4098) \ + \ + NB(8192) \ + NB(16384) #endif /* TEST_NBITS_H */ diff --git a/test/include/test/san.h b/test/include/test/san.h index da07865c..65a235e9 100644 --- a/test/include/test/san.h +++ b/test/include/test/san.h @@ -1,9 +1,9 @@ #if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG) -# define TEST_SAN_UAF_ALIGN_ENABLE "lg_san_uaf_align:12" -# define TEST_SAN_UAF_ALIGN_DISABLE "lg_san_uaf_align:-1" +# define TEST_SAN_UAF_ALIGN_ENABLE "lg_san_uaf_align:12" +# define TEST_SAN_UAF_ALIGN_DISABLE "lg_san_uaf_align:-1" #else -# define TEST_SAN_UAF_ALIGN_ENABLE "" -# define TEST_SAN_UAF_ALIGN_DISABLE "" +# define TEST_SAN_UAF_ALIGN_ENABLE "" +# define TEST_SAN_UAF_ALIGN_DISABLE "" #endif static inline bool @@ -11,4 +11,3 @@ extent_is_guarded(tsdn_t *tsdn, void *ptr) { edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); return edata_guarded_get(edata); } - diff --git a/test/include/test/test.h b/test/include/test/test.h index 80ca7cbb..025c167d 100644 --- a/test/include/test/test.h +++ b/test/include/test/test.h @@ -1,502 +1,503 @@ -#define ASSERT_BUFSIZE 256 +#define ASSERT_BUFSIZE 256 -#define verify_cmp(may_abort, t, a, b, cmp, neg_cmp, pri, ...) do { \ - const t a_ = (a); \ - const t b_ = (b); \ - if (!(a_ cmp b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) " #cmp " (%s) --> " \ - "%" pri " " #neg_cmp " %" pri ": ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_, b_); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(may_abort, prefix, message); \ - } \ -} while (0) +#define verify_cmp(may_abort, t, a, b, cmp, neg_cmp, pri, ...) \ + do { \ + const t a_ = (a); \ + const t b_ = (b); \ + if (!(a_ cmp b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) " #cmp \ + " (%s) --> " \ + "%" pri " " #neg_cmp " %" pri ": ", \ + __func__, __FILE__, __LINE__, #a, #b, a_, b_); \ + malloc_snprintf( \ + message, sizeof(message), __VA_ARGS__); \ + p_test_fail(may_abort, prefix, message); \ + } \ + } while (0) -#define expect_cmp(t, a, b, cmp, neg_cmp, pri, ...) verify_cmp(false, \ - t, a, b, cmp, neg_cmp, pri, __VA_ARGS__) +#define expect_cmp(t, a, b, cmp, neg_cmp, pri, ...) \ + verify_cmp(false, t, a, b, cmp, neg_cmp, pri, __VA_ARGS__) -#define expect_ptr_eq(a, b, ...) expect_cmp(void *, a, b, ==, \ - !=, "p", __VA_ARGS__) -#define expect_ptr_ne(a, b, ...) expect_cmp(void *, a, b, !=, \ - ==, "p", __VA_ARGS__) -#define expect_ptr_null(a, ...) expect_cmp(void *, a, NULL, ==, \ - !=, "p", __VA_ARGS__) -#define expect_ptr_not_null(a, ...) expect_cmp(void *, a, NULL, !=, \ - ==, "p", __VA_ARGS__) +#define expect_ptr_eq(a, b, ...) \ + expect_cmp(void *, a, b, ==, !=, "p", __VA_ARGS__) +#define expect_ptr_ne(a, b, ...) \ + expect_cmp(void *, a, b, !=, ==, "p", __VA_ARGS__) +#define expect_ptr_null(a, ...) \ + expect_cmp(void *, a, NULL, ==, !=, "p", __VA_ARGS__) +#define expect_ptr_not_null(a, ...) \ + expect_cmp(void *, a, NULL, !=, ==, "p", __VA_ARGS__) -#define expect_c_eq(a, b, ...) expect_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) -#define expect_c_ne(a, b, ...) expect_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) -#define expect_c_lt(a, b, ...) expect_cmp(char, a, b, <, >=, "c", __VA_ARGS__) -#define expect_c_le(a, b, ...) expect_cmp(char, a, b, <=, >, "c", __VA_ARGS__) -#define expect_c_ge(a, b, ...) expect_cmp(char, a, b, >=, <, "c", __VA_ARGS__) -#define expect_c_gt(a, b, ...) expect_cmp(char, a, b, >, <=, "c", __VA_ARGS__) +#define expect_c_eq(a, b, ...) expect_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) +#define expect_c_ne(a, b, ...) expect_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) +#define expect_c_lt(a, b, ...) expect_cmp(char, a, b, <, >=, "c", __VA_ARGS__) +#define expect_c_le(a, b, ...) expect_cmp(char, a, b, <=, >, "c", __VA_ARGS__) +#define expect_c_ge(a, b, ...) expect_cmp(char, a, b, >=, <, "c", __VA_ARGS__) +#define expect_c_gt(a, b, ...) expect_cmp(char, a, b, >, <=, "c", __VA_ARGS__) -#define expect_x_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) -#define expect_x_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) -#define expect_x_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) -#define expect_x_le(a, b, ...) expect_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) -#define expect_x_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) -#define expect_x_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) +#define expect_x_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) +#define expect_x_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) +#define expect_x_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) +#define expect_x_le(a, b, ...) expect_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) +#define expect_x_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) +#define expect_x_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) -#define expect_d_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) -#define expect_d_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) -#define expect_d_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "d", __VA_ARGS__) -#define expect_d_le(a, b, ...) expect_cmp(int, a, b, <=, >, "d", __VA_ARGS__) -#define expect_d_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "d", __VA_ARGS__) -#define expect_d_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "d", __VA_ARGS__) +#define expect_d_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) +#define expect_d_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) +#define expect_d_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "d", __VA_ARGS__) +#define expect_d_le(a, b, ...) expect_cmp(int, a, b, <=, >, "d", __VA_ARGS__) +#define expect_d_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "d", __VA_ARGS__) +#define expect_d_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "d", __VA_ARGS__) -#define expect_u_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) -#define expect_u_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) -#define expect_u_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "u", __VA_ARGS__) -#define expect_u_le(a, b, ...) expect_cmp(int, a, b, <=, >, "u", __VA_ARGS__) -#define expect_u_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "u", __VA_ARGS__) -#define expect_u_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "u", __VA_ARGS__) +#define expect_u_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) +#define expect_u_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) +#define expect_u_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "u", __VA_ARGS__) +#define expect_u_le(a, b, ...) expect_cmp(int, a, b, <=, >, "u", __VA_ARGS__) +#define expect_u_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "u", __VA_ARGS__) +#define expect_u_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "u", __VA_ARGS__) -#define expect_ld_eq(a, b, ...) expect_cmp(long, a, b, ==, \ - !=, "ld", __VA_ARGS__) -#define expect_ld_ne(a, b, ...) expect_cmp(long, a, b, !=, \ - ==, "ld", __VA_ARGS__) -#define expect_ld_lt(a, b, ...) expect_cmp(long, a, b, <, \ - >=, "ld", __VA_ARGS__) -#define expect_ld_le(a, b, ...) expect_cmp(long, a, b, <=, \ - >, "ld", __VA_ARGS__) -#define expect_ld_ge(a, b, ...) expect_cmp(long, a, b, >=, \ - <, "ld", __VA_ARGS__) -#define expect_ld_gt(a, b, ...) expect_cmp(long, a, b, >, \ - <=, "ld", __VA_ARGS__) +#define expect_ld_eq(a, b, ...) \ + expect_cmp(long, a, b, ==, !=, "ld", __VA_ARGS__) +#define expect_ld_ne(a, b, ...) \ + expect_cmp(long, a, b, !=, ==, "ld", __VA_ARGS__) +#define expect_ld_lt(a, b, ...) expect_cmp(long, a, b, <, >=, "ld", __VA_ARGS__) +#define expect_ld_le(a, b, ...) expect_cmp(long, a, b, <=, >, "ld", __VA_ARGS__) +#define expect_ld_ge(a, b, ...) expect_cmp(long, a, b, >=, <, "ld", __VA_ARGS__) +#define expect_ld_gt(a, b, ...) expect_cmp(long, a, b, >, <=, "ld", __VA_ARGS__) -#define expect_lu_eq(a, b, ...) expect_cmp(unsigned long, \ - a, b, ==, !=, "lu", __VA_ARGS__) -#define expect_lu_ne(a, b, ...) expect_cmp(unsigned long, \ - a, b, !=, ==, "lu", __VA_ARGS__) -#define expect_lu_lt(a, b, ...) expect_cmp(unsigned long, \ - a, b, <, >=, "lu", __VA_ARGS__) -#define expect_lu_le(a, b, ...) expect_cmp(unsigned long, \ - a, b, <=, >, "lu", __VA_ARGS__) -#define expect_lu_ge(a, b, ...) expect_cmp(unsigned long, \ - a, b, >=, <, "lu", __VA_ARGS__) -#define expect_lu_gt(a, b, ...) expect_cmp(unsigned long, \ - a, b, >, <=, "lu", __VA_ARGS__) +#define expect_lu_eq(a, b, ...) \ + expect_cmp(unsigned long, a, b, ==, !=, "lu", __VA_ARGS__) +#define expect_lu_ne(a, b, ...) \ + expect_cmp(unsigned long, a, b, !=, ==, "lu", __VA_ARGS__) +#define expect_lu_lt(a, b, ...) \ + expect_cmp(unsigned long, a, b, <, >=, "lu", __VA_ARGS__) +#define expect_lu_le(a, b, ...) \ + expect_cmp(unsigned long, a, b, <=, >, "lu", __VA_ARGS__) +#define expect_lu_ge(a, b, ...) \ + expect_cmp(unsigned long, a, b, >=, <, "lu", __VA_ARGS__) +#define expect_lu_gt(a, b, ...) \ + expect_cmp(unsigned long, a, b, >, <=, "lu", __VA_ARGS__) -#define expect_qd_eq(a, b, ...) expect_cmp(long long, a, b, ==, \ - !=, "qd", __VA_ARGS__) -#define expect_qd_ne(a, b, ...) expect_cmp(long long, a, b, !=, \ - ==, "qd", __VA_ARGS__) -#define expect_qd_lt(a, b, ...) expect_cmp(long long, a, b, <, \ - >=, "qd", __VA_ARGS__) -#define expect_qd_le(a, b, ...) expect_cmp(long long, a, b, <=, \ - >, "qd", __VA_ARGS__) -#define expect_qd_ge(a, b, ...) expect_cmp(long long, a, b, >=, \ - <, "qd", __VA_ARGS__) -#define expect_qd_gt(a, b, ...) expect_cmp(long long, a, b, >, \ - <=, "qd", __VA_ARGS__) +#define expect_qd_eq(a, b, ...) \ + expect_cmp(long long, a, b, ==, !=, "qd", __VA_ARGS__) +#define expect_qd_ne(a, b, ...) \ + expect_cmp(long long, a, b, !=, ==, "qd", __VA_ARGS__) +#define expect_qd_lt(a, b, ...) \ + expect_cmp(long long, a, b, <, >=, "qd", __VA_ARGS__) +#define expect_qd_le(a, b, ...) \ + expect_cmp(long long, a, b, <=, >, "qd", __VA_ARGS__) +#define expect_qd_ge(a, b, ...) \ + expect_cmp(long long, a, b, >=, <, "qd", __VA_ARGS__) +#define expect_qd_gt(a, b, ...) \ + expect_cmp(long long, a, b, >, <=, "qd", __VA_ARGS__) -#define expect_qu_eq(a, b, ...) expect_cmp(unsigned long long, \ - a, b, ==, !=, "qu", __VA_ARGS__) -#define expect_qu_ne(a, b, ...) expect_cmp(unsigned long long, \ - a, b, !=, ==, "qu", __VA_ARGS__) -#define expect_qu_lt(a, b, ...) expect_cmp(unsigned long long, \ - a, b, <, >=, "qu", __VA_ARGS__) -#define expect_qu_le(a, b, ...) expect_cmp(unsigned long long, \ - a, b, <=, >, "qu", __VA_ARGS__) -#define expect_qu_ge(a, b, ...) expect_cmp(unsigned long long, \ - a, b, >=, <, "qu", __VA_ARGS__) -#define expect_qu_gt(a, b, ...) expect_cmp(unsigned long long, \ - a, b, >, <=, "qu", __VA_ARGS__) +#define expect_qu_eq(a, b, ...) \ + expect_cmp(unsigned long long, a, b, ==, !=, "qu", __VA_ARGS__) +#define expect_qu_ne(a, b, ...) \ + expect_cmp(unsigned long long, a, b, !=, ==, "qu", __VA_ARGS__) +#define expect_qu_lt(a, b, ...) \ + expect_cmp(unsigned long long, a, b, <, >=, "qu", __VA_ARGS__) +#define expect_qu_le(a, b, ...) \ + expect_cmp(unsigned long long, a, b, <=, >, "qu", __VA_ARGS__) +#define expect_qu_ge(a, b, ...) \ + expect_cmp(unsigned long long, a, b, >=, <, "qu", __VA_ARGS__) +#define expect_qu_gt(a, b, ...) \ + expect_cmp(unsigned long long, a, b, >, <=, "qu", __VA_ARGS__) -#define expect_jd_eq(a, b, ...) expect_cmp(intmax_t, a, b, ==, \ - !=, "jd", __VA_ARGS__) -#define expect_jd_ne(a, b, ...) expect_cmp(intmax_t, a, b, !=, \ - ==, "jd", __VA_ARGS__) -#define expect_jd_lt(a, b, ...) expect_cmp(intmax_t, a, b, <, \ - >=, "jd", __VA_ARGS__) -#define expect_jd_le(a, b, ...) expect_cmp(intmax_t, a, b, <=, \ - >, "jd", __VA_ARGS__) -#define expect_jd_ge(a, b, ...) expect_cmp(intmax_t, a, b, >=, \ - <, "jd", __VA_ARGS__) -#define expect_jd_gt(a, b, ...) expect_cmp(intmax_t, a, b, >, \ - <=, "jd", __VA_ARGS__) +#define expect_jd_eq(a, b, ...) \ + expect_cmp(intmax_t, a, b, ==, !=, "jd", __VA_ARGS__) +#define expect_jd_ne(a, b, ...) \ + expect_cmp(intmax_t, a, b, !=, ==, "jd", __VA_ARGS__) +#define expect_jd_lt(a, b, ...) \ + expect_cmp(intmax_t, a, b, <, >=, "jd", __VA_ARGS__) +#define expect_jd_le(a, b, ...) \ + expect_cmp(intmax_t, a, b, <=, >, "jd", __VA_ARGS__) +#define expect_jd_ge(a, b, ...) \ + expect_cmp(intmax_t, a, b, >=, <, "jd", __VA_ARGS__) +#define expect_jd_gt(a, b, ...) \ + expect_cmp(intmax_t, a, b, >, <=, "jd", __VA_ARGS__) -#define expect_ju_eq(a, b, ...) expect_cmp(uintmax_t, a, b, ==, \ - !=, "ju", __VA_ARGS__) -#define expect_ju_ne(a, b, ...) expect_cmp(uintmax_t, a, b, !=, \ - ==, "ju", __VA_ARGS__) -#define expect_ju_lt(a, b, ...) expect_cmp(uintmax_t, a, b, <, \ - >=, "ju", __VA_ARGS__) -#define expect_ju_le(a, b, ...) expect_cmp(uintmax_t, a, b, <=, \ - >, "ju", __VA_ARGS__) -#define expect_ju_ge(a, b, ...) expect_cmp(uintmax_t, a, b, >=, \ - <, "ju", __VA_ARGS__) -#define expect_ju_gt(a, b, ...) expect_cmp(uintmax_t, a, b, >, \ - <=, "ju", __VA_ARGS__) +#define expect_ju_eq(a, b, ...) \ + expect_cmp(uintmax_t, a, b, ==, !=, "ju", __VA_ARGS__) +#define expect_ju_ne(a, b, ...) \ + expect_cmp(uintmax_t, a, b, !=, ==, "ju", __VA_ARGS__) +#define expect_ju_lt(a, b, ...) \ + expect_cmp(uintmax_t, a, b, <, >=, "ju", __VA_ARGS__) +#define expect_ju_le(a, b, ...) \ + expect_cmp(uintmax_t, a, b, <=, >, "ju", __VA_ARGS__) +#define expect_ju_ge(a, b, ...) \ + expect_cmp(uintmax_t, a, b, >=, <, "ju", __VA_ARGS__) +#define expect_ju_gt(a, b, ...) \ + expect_cmp(uintmax_t, a, b, >, <=, "ju", __VA_ARGS__) -#define expect_zd_eq(a, b, ...) expect_cmp(ssize_t, a, b, ==, \ - !=, "zd", __VA_ARGS__) -#define expect_zd_ne(a, b, ...) expect_cmp(ssize_t, a, b, !=, \ - ==, "zd", __VA_ARGS__) -#define expect_zd_lt(a, b, ...) expect_cmp(ssize_t, a, b, <, \ - >=, "zd", __VA_ARGS__) -#define expect_zd_le(a, b, ...) expect_cmp(ssize_t, a, b, <=, \ - >, "zd", __VA_ARGS__) -#define expect_zd_ge(a, b, ...) expect_cmp(ssize_t, a, b, >=, \ - <, "zd", __VA_ARGS__) -#define expect_zd_gt(a, b, ...) expect_cmp(ssize_t, a, b, >, \ - <=, "zd", __VA_ARGS__) +#define expect_zd_eq(a, b, ...) \ + expect_cmp(ssize_t, a, b, ==, !=, "zd", __VA_ARGS__) +#define expect_zd_ne(a, b, ...) \ + expect_cmp(ssize_t, a, b, !=, ==, "zd", __VA_ARGS__) +#define expect_zd_lt(a, b, ...) \ + expect_cmp(ssize_t, a, b, <, >=, "zd", __VA_ARGS__) +#define expect_zd_le(a, b, ...) \ + expect_cmp(ssize_t, a, b, <=, >, "zd", __VA_ARGS__) +#define expect_zd_ge(a, b, ...) \ + expect_cmp(ssize_t, a, b, >=, <, "zd", __VA_ARGS__) +#define expect_zd_gt(a, b, ...) \ + expect_cmp(ssize_t, a, b, >, <=, "zd", __VA_ARGS__) -#define expect_zu_eq(a, b, ...) expect_cmp(size_t, a, b, ==, \ - !=, "zu", __VA_ARGS__) -#define expect_zu_ne(a, b, ...) expect_cmp(size_t, a, b, !=, \ - ==, "zu", __VA_ARGS__) -#define expect_zu_lt(a, b, ...) expect_cmp(size_t, a, b, <, \ - >=, "zu", __VA_ARGS__) -#define expect_zu_le(a, b, ...) expect_cmp(size_t, a, b, <=, \ - >, "zu", __VA_ARGS__) -#define expect_zu_ge(a, b, ...) expect_cmp(size_t, a, b, >=, \ - <, "zu", __VA_ARGS__) -#define expect_zu_gt(a, b, ...) expect_cmp(size_t, a, b, >, \ - <=, "zu", __VA_ARGS__) +#define expect_zu_eq(a, b, ...) \ + expect_cmp(size_t, a, b, ==, !=, "zu", __VA_ARGS__) +#define expect_zu_ne(a, b, ...) \ + expect_cmp(size_t, a, b, !=, ==, "zu", __VA_ARGS__) +#define expect_zu_lt(a, b, ...) \ + expect_cmp(size_t, a, b, <, >=, "zu", __VA_ARGS__) +#define expect_zu_le(a, b, ...) \ + expect_cmp(size_t, a, b, <=, >, "zu", __VA_ARGS__) +#define expect_zu_ge(a, b, ...) \ + expect_cmp(size_t, a, b, >=, <, "zu", __VA_ARGS__) +#define expect_zu_gt(a, b, ...) \ + expect_cmp(size_t, a, b, >, <=, "zu", __VA_ARGS__) -#define expect_d32_eq(a, b, ...) expect_cmp(int32_t, a, b, ==, \ - !=, FMTd32, __VA_ARGS__) -#define expect_d32_ne(a, b, ...) expect_cmp(int32_t, a, b, !=, \ - ==, FMTd32, __VA_ARGS__) -#define expect_d32_lt(a, b, ...) expect_cmp(int32_t, a, b, <, \ - >=, FMTd32, __VA_ARGS__) -#define expect_d32_le(a, b, ...) expect_cmp(int32_t, a, b, <=, \ - >, FMTd32, __VA_ARGS__) -#define expect_d32_ge(a, b, ...) expect_cmp(int32_t, a, b, >=, \ - <, FMTd32, __VA_ARGS__) -#define expect_d32_gt(a, b, ...) expect_cmp(int32_t, a, b, >, \ - <=, FMTd32, __VA_ARGS__) +#define expect_d32_eq(a, b, ...) \ + expect_cmp(int32_t, a, b, ==, !=, FMTd32, __VA_ARGS__) +#define expect_d32_ne(a, b, ...) \ + expect_cmp(int32_t, a, b, !=, ==, FMTd32, __VA_ARGS__) +#define expect_d32_lt(a, b, ...) \ + expect_cmp(int32_t, a, b, <, >=, FMTd32, __VA_ARGS__) +#define expect_d32_le(a, b, ...) \ + expect_cmp(int32_t, a, b, <=, >, FMTd32, __VA_ARGS__) +#define expect_d32_ge(a, b, ...) \ + expect_cmp(int32_t, a, b, >=, <, FMTd32, __VA_ARGS__) +#define expect_d32_gt(a, b, ...) \ + expect_cmp(int32_t, a, b, >, <=, FMTd32, __VA_ARGS__) -#define expect_u32_eq(a, b, ...) expect_cmp(uint32_t, a, b, ==, \ - !=, FMTu32, __VA_ARGS__) -#define expect_u32_ne(a, b, ...) expect_cmp(uint32_t, a, b, !=, \ - ==, FMTu32, __VA_ARGS__) -#define expect_u32_lt(a, b, ...) expect_cmp(uint32_t, a, b, <, \ - >=, FMTu32, __VA_ARGS__) -#define expect_u32_le(a, b, ...) expect_cmp(uint32_t, a, b, <=, \ - >, FMTu32, __VA_ARGS__) -#define expect_u32_ge(a, b, ...) expect_cmp(uint32_t, a, b, >=, \ - <, FMTu32, __VA_ARGS__) -#define expect_u32_gt(a, b, ...) expect_cmp(uint32_t, a, b, >, \ - <=, FMTu32, __VA_ARGS__) +#define expect_u32_eq(a, b, ...) \ + expect_cmp(uint32_t, a, b, ==, !=, FMTu32, __VA_ARGS__) +#define expect_u32_ne(a, b, ...) \ + expect_cmp(uint32_t, a, b, !=, ==, FMTu32, __VA_ARGS__) +#define expect_u32_lt(a, b, ...) \ + expect_cmp(uint32_t, a, b, <, >=, FMTu32, __VA_ARGS__) +#define expect_u32_le(a, b, ...) \ + expect_cmp(uint32_t, a, b, <=, >, FMTu32, __VA_ARGS__) +#define expect_u32_ge(a, b, ...) \ + expect_cmp(uint32_t, a, b, >=, <, FMTu32, __VA_ARGS__) +#define expect_u32_gt(a, b, ...) \ + expect_cmp(uint32_t, a, b, >, <=, FMTu32, __VA_ARGS__) -#define expect_d64_eq(a, b, ...) expect_cmp(int64_t, a, b, ==, \ - !=, FMTd64, __VA_ARGS__) -#define expect_d64_ne(a, b, ...) expect_cmp(int64_t, a, b, !=, \ - ==, FMTd64, __VA_ARGS__) -#define expect_d64_lt(a, b, ...) expect_cmp(int64_t, a, b, <, \ - >=, FMTd64, __VA_ARGS__) -#define expect_d64_le(a, b, ...) expect_cmp(int64_t, a, b, <=, \ - >, FMTd64, __VA_ARGS__) -#define expect_d64_ge(a, b, ...) expect_cmp(int64_t, a, b, >=, \ - <, FMTd64, __VA_ARGS__) -#define expect_d64_gt(a, b, ...) expect_cmp(int64_t, a, b, >, \ - <=, FMTd64, __VA_ARGS__) +#define expect_d64_eq(a, b, ...) \ + expect_cmp(int64_t, a, b, ==, !=, FMTd64, __VA_ARGS__) +#define expect_d64_ne(a, b, ...) \ + expect_cmp(int64_t, a, b, !=, ==, FMTd64, __VA_ARGS__) +#define expect_d64_lt(a, b, ...) \ + expect_cmp(int64_t, a, b, <, >=, FMTd64, __VA_ARGS__) +#define expect_d64_le(a, b, ...) \ + expect_cmp(int64_t, a, b, <=, >, FMTd64, __VA_ARGS__) +#define expect_d64_ge(a, b, ...) \ + expect_cmp(int64_t, a, b, >=, <, FMTd64, __VA_ARGS__) +#define expect_d64_gt(a, b, ...) \ + expect_cmp(int64_t, a, b, >, <=, FMTd64, __VA_ARGS__) -#define expect_u64_eq(a, b, ...) expect_cmp(uint64_t, a, b, ==, \ - !=, FMTu64, __VA_ARGS__) -#define expect_u64_ne(a, b, ...) expect_cmp(uint64_t, a, b, !=, \ - ==, FMTu64, __VA_ARGS__) -#define expect_u64_lt(a, b, ...) expect_cmp(uint64_t, a, b, <, \ - >=, FMTu64, __VA_ARGS__) -#define expect_u64_le(a, b, ...) expect_cmp(uint64_t, a, b, <=, \ - >, FMTu64, __VA_ARGS__) -#define expect_u64_ge(a, b, ...) expect_cmp(uint64_t, a, b, >=, \ - <, FMTu64, __VA_ARGS__) -#define expect_u64_gt(a, b, ...) expect_cmp(uint64_t, a, b, >, \ - <=, FMTu64, __VA_ARGS__) +#define expect_u64_eq(a, b, ...) \ + expect_cmp(uint64_t, a, b, ==, !=, FMTu64, __VA_ARGS__) +#define expect_u64_ne(a, b, ...) \ + expect_cmp(uint64_t, a, b, !=, ==, FMTu64, __VA_ARGS__) +#define expect_u64_lt(a, b, ...) \ + expect_cmp(uint64_t, a, b, <, >=, FMTu64, __VA_ARGS__) +#define expect_u64_le(a, b, ...) \ + expect_cmp(uint64_t, a, b, <=, >, FMTu64, __VA_ARGS__) +#define expect_u64_ge(a, b, ...) \ + expect_cmp(uint64_t, a, b, >=, <, FMTu64, __VA_ARGS__) +#define expect_u64_gt(a, b, ...) \ + expect_cmp(uint64_t, a, b, >, <=, FMTu64, __VA_ARGS__) -#define verify_b_eq(may_abort, a, b, ...) do { \ - bool a_ = (a); \ - bool b_ = (b); \ - if (!(a_ == b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) == (%s) --> %s != %s: ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_ ? "true" : "false", \ - b_ ? "true" : "false"); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(may_abort, prefix, message); \ - } \ -} while (0) +#define verify_b_eq(may_abort, a, b, ...) \ + do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ == b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) == (%s) --> %s != %s: ", \ + __func__, __FILE__, __LINE__, #a, #b, \ + a_ ? "true" : "false", b_ ? "true" : "false"); \ + malloc_snprintf( \ + message, sizeof(message), __VA_ARGS__); \ + p_test_fail(may_abort, prefix, message); \ + } \ + } while (0) -#define verify_b_ne(may_abort, a, b, ...) do { \ - bool a_ = (a); \ - bool b_ = (b); \ - if (!(a_ != b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) != (%s) --> %s == %s: ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_ ? "true" : "false", \ - b_ ? "true" : "false"); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(may_abort, prefix, message); \ - } \ -} while (0) +#define verify_b_ne(may_abort, a, b, ...) \ + do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ != b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) != (%s) --> %s == %s: ", \ + __func__, __FILE__, __LINE__, #a, #b, \ + a_ ? "true" : "false", b_ ? "true" : "false"); \ + malloc_snprintf( \ + message, sizeof(message), __VA_ARGS__); \ + p_test_fail(may_abort, prefix, message); \ + } \ + } while (0) -#define expect_b_eq(a, b, ...) verify_b_eq(false, a, b, __VA_ARGS__) -#define expect_b_ne(a, b, ...) verify_b_ne(false, a, b, __VA_ARGS__) +#define expect_b_eq(a, b, ...) verify_b_eq(false, a, b, __VA_ARGS__) +#define expect_b_ne(a, b, ...) verify_b_ne(false, a, b, __VA_ARGS__) -#define expect_true(a, ...) expect_b_eq(a, true, __VA_ARGS__) -#define expect_false(a, ...) expect_b_eq(a, false, __VA_ARGS__) +#define expect_true(a, ...) expect_b_eq(a, true, __VA_ARGS__) +#define expect_false(a, ...) expect_b_eq(a, false, __VA_ARGS__) -#define verify_str_eq(may_abort, a, b, ...) do { \ - if (strcmp((a), (b)) != 0) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) same as (%s) --> " \ - "\"%s\" differs from \"%s\": ", \ - __func__, __FILE__, __LINE__, #a, #b, a, b); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(may_abort, prefix, message); \ - } \ -} while (0) +#define verify_str_eq(may_abort, a, b, ...) \ + do { \ + if (strcmp((a), (b)) != 0) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) same as (%s) --> " \ + "\"%s\" differs from \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf( \ + message, sizeof(message), __VA_ARGS__); \ + p_test_fail(may_abort, prefix, message); \ + } \ + } while (0) -#define verify_str_ne(may_abort, a, b, ...) do { \ - if (strcmp((a), (b)) == 0) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) differs from (%s) --> " \ - "\"%s\" same as \"%s\": ", \ - __func__, __FILE__, __LINE__, #a, #b, a, b); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(may_abort, prefix, message); \ - } \ -} while (0) +#define verify_str_ne(may_abort, a, b, ...) \ + do { \ + if (strcmp((a), (b)) == 0) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) differs from (%s) --> " \ + "\"%s\" same as \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf( \ + message, sizeof(message), __VA_ARGS__); \ + p_test_fail(may_abort, prefix, message); \ + } \ + } while (0) #define expect_str_eq(a, b, ...) verify_str_eq(false, a, b, __VA_ARGS__) #define expect_str_ne(a, b, ...) verify_str_ne(false, a, b, __VA_ARGS__) -#define verify_not_reached(may_abort, ...) do { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Unreachable code reached: ", \ - __func__, __FILE__, __LINE__); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(may_abort, prefix, message); \ -} while (0) +#define verify_not_reached(may_abort, ...) \ + do { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Unreachable code reached: ", __func__, \ + __FILE__, __LINE__); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(may_abort, prefix, message); \ + } while (0) #define expect_not_reached(...) verify_not_reached(false, __VA_ARGS__) -#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) verify_cmp(true, \ - t, a, b, cmp, neg_cmp, pri, __VA_ARGS__) +#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) \ + verify_cmp(true, t, a, b, cmp, neg_cmp, pri, __VA_ARGS__) -#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ - !=, "p", __VA_ARGS__) -#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ - ==, "p", __VA_ARGS__) -#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ - !=, "p", __VA_ARGS__) -#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ - ==, "p", __VA_ARGS__) +#define assert_ptr_eq(a, b, ...) \ + assert_cmp(void *, a, b, ==, !=, "p", __VA_ARGS__) +#define assert_ptr_ne(a, b, ...) \ + assert_cmp(void *, a, b, !=, ==, "p", __VA_ARGS__) +#define assert_ptr_null(a, ...) \ + assert_cmp(void *, a, NULL, ==, !=, "p", __VA_ARGS__) +#define assert_ptr_not_null(a, ...) \ + assert_cmp(void *, a, NULL, !=, ==, "p", __VA_ARGS__) -#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) -#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) -#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) -#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) -#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) -#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) +#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) +#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) +#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) +#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) +#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) +#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) -#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) -#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) -#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) -#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) -#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) -#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) +#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) +#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) +#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) +#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) +#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) +#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) -#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) -#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) -#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) -#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) -#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) -#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) +#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) +#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) +#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) +#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) +#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) +#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) -#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) -#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) -#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) -#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) -#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) -#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) +#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) +#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) +#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) +#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) +#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) +#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) -#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ - !=, "ld", __VA_ARGS__) -#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ - ==, "ld", __VA_ARGS__) -#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ - >=, "ld", __VA_ARGS__) -#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ - >, "ld", __VA_ARGS__) -#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ - <, "ld", __VA_ARGS__) -#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ - <=, "ld", __VA_ARGS__) +#define assert_ld_eq(a, b, ...) \ + assert_cmp(long, a, b, ==, !=, "ld", __VA_ARGS__) +#define assert_ld_ne(a, b, ...) \ + assert_cmp(long, a, b, !=, ==, "ld", __VA_ARGS__) +#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, >=, "ld", __VA_ARGS__) +#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, >, "ld", __VA_ARGS__) +#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, <, "ld", __VA_ARGS__) +#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, <=, "ld", __VA_ARGS__) -#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ - a, b, ==, !=, "lu", __VA_ARGS__) -#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ - a, b, !=, ==, "lu", __VA_ARGS__) -#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ - a, b, <, >=, "lu", __VA_ARGS__) -#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ - a, b, <=, >, "lu", __VA_ARGS__) -#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ - a, b, >=, <, "lu", __VA_ARGS__) -#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ - a, b, >, <=, "lu", __VA_ARGS__) +#define assert_lu_eq(a, b, ...) \ + assert_cmp(unsigned long, a, b, ==, !=, "lu", __VA_ARGS__) +#define assert_lu_ne(a, b, ...) \ + assert_cmp(unsigned long, a, b, !=, ==, "lu", __VA_ARGS__) +#define assert_lu_lt(a, b, ...) \ + assert_cmp(unsigned long, a, b, <, >=, "lu", __VA_ARGS__) +#define assert_lu_le(a, b, ...) \ + assert_cmp(unsigned long, a, b, <=, >, "lu", __VA_ARGS__) +#define assert_lu_ge(a, b, ...) \ + assert_cmp(unsigned long, a, b, >=, <, "lu", __VA_ARGS__) +#define assert_lu_gt(a, b, ...) \ + assert_cmp(unsigned long, a, b, >, <=, "lu", __VA_ARGS__) -#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ - !=, "qd", __VA_ARGS__) -#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ - ==, "qd", __VA_ARGS__) -#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ - >=, "qd", __VA_ARGS__) -#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ - >, "qd", __VA_ARGS__) -#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ - <, "qd", __VA_ARGS__) -#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ - <=, "qd", __VA_ARGS__) +#define assert_qd_eq(a, b, ...) \ + assert_cmp(long long, a, b, ==, !=, "qd", __VA_ARGS__) +#define assert_qd_ne(a, b, ...) \ + assert_cmp(long long, a, b, !=, ==, "qd", __VA_ARGS__) +#define assert_qd_lt(a, b, ...) \ + assert_cmp(long long, a, b, <, >=, "qd", __VA_ARGS__) +#define assert_qd_le(a, b, ...) \ + assert_cmp(long long, a, b, <=, >, "qd", __VA_ARGS__) +#define assert_qd_ge(a, b, ...) \ + assert_cmp(long long, a, b, >=, <, "qd", __VA_ARGS__) +#define assert_qd_gt(a, b, ...) \ + assert_cmp(long long, a, b, >, <=, "qd", __VA_ARGS__) -#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ - a, b, ==, !=, "qu", __VA_ARGS__) -#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ - a, b, !=, ==, "qu", __VA_ARGS__) -#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ - a, b, <, >=, "qu", __VA_ARGS__) -#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ - a, b, <=, >, "qu", __VA_ARGS__) -#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ - a, b, >=, <, "qu", __VA_ARGS__) -#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ - a, b, >, <=, "qu", __VA_ARGS__) +#define assert_qu_eq(a, b, ...) \ + assert_cmp(unsigned long long, a, b, ==, !=, "qu", __VA_ARGS__) +#define assert_qu_ne(a, b, ...) \ + assert_cmp(unsigned long long, a, b, !=, ==, "qu", __VA_ARGS__) +#define assert_qu_lt(a, b, ...) \ + assert_cmp(unsigned long long, a, b, <, >=, "qu", __VA_ARGS__) +#define assert_qu_le(a, b, ...) \ + assert_cmp(unsigned long long, a, b, <=, >, "qu", __VA_ARGS__) +#define assert_qu_ge(a, b, ...) \ + assert_cmp(unsigned long long, a, b, >=, <, "qu", __VA_ARGS__) +#define assert_qu_gt(a, b, ...) \ + assert_cmp(unsigned long long, a, b, >, <=, "qu", __VA_ARGS__) -#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ - !=, "jd", __VA_ARGS__) -#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ - ==, "jd", __VA_ARGS__) -#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ - >=, "jd", __VA_ARGS__) -#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ - >, "jd", __VA_ARGS__) -#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ - <, "jd", __VA_ARGS__) -#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ - <=, "jd", __VA_ARGS__) +#define assert_jd_eq(a, b, ...) \ + assert_cmp(intmax_t, a, b, ==, !=, "jd", __VA_ARGS__) +#define assert_jd_ne(a, b, ...) \ + assert_cmp(intmax_t, a, b, !=, ==, "jd", __VA_ARGS__) +#define assert_jd_lt(a, b, ...) \ + assert_cmp(intmax_t, a, b, <, >=, "jd", __VA_ARGS__) +#define assert_jd_le(a, b, ...) \ + assert_cmp(intmax_t, a, b, <=, >, "jd", __VA_ARGS__) +#define assert_jd_ge(a, b, ...) \ + assert_cmp(intmax_t, a, b, >=, <, "jd", __VA_ARGS__) +#define assert_jd_gt(a, b, ...) \ + assert_cmp(intmax_t, a, b, >, <=, "jd", __VA_ARGS__) -#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ - !=, "ju", __VA_ARGS__) -#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ - ==, "ju", __VA_ARGS__) -#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ - >=, "ju", __VA_ARGS__) -#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ - >, "ju", __VA_ARGS__) -#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ - <, "ju", __VA_ARGS__) -#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ - <=, "ju", __VA_ARGS__) +#define assert_ju_eq(a, b, ...) \ + assert_cmp(uintmax_t, a, b, ==, !=, "ju", __VA_ARGS__) +#define assert_ju_ne(a, b, ...) \ + assert_cmp(uintmax_t, a, b, !=, ==, "ju", __VA_ARGS__) +#define assert_ju_lt(a, b, ...) \ + assert_cmp(uintmax_t, a, b, <, >=, "ju", __VA_ARGS__) +#define assert_ju_le(a, b, ...) \ + assert_cmp(uintmax_t, a, b, <=, >, "ju", __VA_ARGS__) +#define assert_ju_ge(a, b, ...) \ + assert_cmp(uintmax_t, a, b, >=, <, "ju", __VA_ARGS__) +#define assert_ju_gt(a, b, ...) \ + assert_cmp(uintmax_t, a, b, >, <=, "ju", __VA_ARGS__) -#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ - !=, "zd", __VA_ARGS__) -#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ - ==, "zd", __VA_ARGS__) -#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ - >=, "zd", __VA_ARGS__) -#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ - >, "zd", __VA_ARGS__) -#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ - <, "zd", __VA_ARGS__) -#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ - <=, "zd", __VA_ARGS__) +#define assert_zd_eq(a, b, ...) \ + assert_cmp(ssize_t, a, b, ==, !=, "zd", __VA_ARGS__) +#define assert_zd_ne(a, b, ...) \ + assert_cmp(ssize_t, a, b, !=, ==, "zd", __VA_ARGS__) +#define assert_zd_lt(a, b, ...) \ + assert_cmp(ssize_t, a, b, <, >=, "zd", __VA_ARGS__) +#define assert_zd_le(a, b, ...) \ + assert_cmp(ssize_t, a, b, <=, >, "zd", __VA_ARGS__) +#define assert_zd_ge(a, b, ...) \ + assert_cmp(ssize_t, a, b, >=, <, "zd", __VA_ARGS__) +#define assert_zd_gt(a, b, ...) \ + assert_cmp(ssize_t, a, b, >, <=, "zd", __VA_ARGS__) -#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ - !=, "zu", __VA_ARGS__) -#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ - ==, "zu", __VA_ARGS__) -#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ - >=, "zu", __VA_ARGS__) -#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ - >, "zu", __VA_ARGS__) -#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ - <, "zu", __VA_ARGS__) -#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ - <=, "zu", __VA_ARGS__) +#define assert_zu_eq(a, b, ...) \ + assert_cmp(size_t, a, b, ==, !=, "zu", __VA_ARGS__) +#define assert_zu_ne(a, b, ...) \ + assert_cmp(size_t, a, b, !=, ==, "zu", __VA_ARGS__) +#define assert_zu_lt(a, b, ...) \ + assert_cmp(size_t, a, b, <, >=, "zu", __VA_ARGS__) +#define assert_zu_le(a, b, ...) \ + assert_cmp(size_t, a, b, <=, >, "zu", __VA_ARGS__) +#define assert_zu_ge(a, b, ...) \ + assert_cmp(size_t, a, b, >=, <, "zu", __VA_ARGS__) +#define assert_zu_gt(a, b, ...) \ + assert_cmp(size_t, a, b, >, <=, "zu", __VA_ARGS__) -#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ - !=, FMTd32, __VA_ARGS__) -#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ - ==, FMTd32, __VA_ARGS__) -#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ - >=, FMTd32, __VA_ARGS__) -#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ - >, FMTd32, __VA_ARGS__) -#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ - <, FMTd32, __VA_ARGS__) -#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ - <=, FMTd32, __VA_ARGS__) +#define assert_d32_eq(a, b, ...) \ + assert_cmp(int32_t, a, b, ==, !=, FMTd32, __VA_ARGS__) +#define assert_d32_ne(a, b, ...) \ + assert_cmp(int32_t, a, b, !=, ==, FMTd32, __VA_ARGS__) +#define assert_d32_lt(a, b, ...) \ + assert_cmp(int32_t, a, b, <, >=, FMTd32, __VA_ARGS__) +#define assert_d32_le(a, b, ...) \ + assert_cmp(int32_t, a, b, <=, >, FMTd32, __VA_ARGS__) +#define assert_d32_ge(a, b, ...) \ + assert_cmp(int32_t, a, b, >=, <, FMTd32, __VA_ARGS__) +#define assert_d32_gt(a, b, ...) \ + assert_cmp(int32_t, a, b, >, <=, FMTd32, __VA_ARGS__) -#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ - !=, FMTu32, __VA_ARGS__) -#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ - ==, FMTu32, __VA_ARGS__) -#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ - >=, FMTu32, __VA_ARGS__) -#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ - >, FMTu32, __VA_ARGS__) -#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ - <, FMTu32, __VA_ARGS__) -#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ - <=, FMTu32, __VA_ARGS__) +#define assert_u32_eq(a, b, ...) \ + assert_cmp(uint32_t, a, b, ==, !=, FMTu32, __VA_ARGS__) +#define assert_u32_ne(a, b, ...) \ + assert_cmp(uint32_t, a, b, !=, ==, FMTu32, __VA_ARGS__) +#define assert_u32_lt(a, b, ...) \ + assert_cmp(uint32_t, a, b, <, >=, FMTu32, __VA_ARGS__) +#define assert_u32_le(a, b, ...) \ + assert_cmp(uint32_t, a, b, <=, >, FMTu32, __VA_ARGS__) +#define assert_u32_ge(a, b, ...) \ + assert_cmp(uint32_t, a, b, >=, <, FMTu32, __VA_ARGS__) +#define assert_u32_gt(a, b, ...) \ + assert_cmp(uint32_t, a, b, >, <=, FMTu32, __VA_ARGS__) -#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ - !=, FMTd64, __VA_ARGS__) -#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ - ==, FMTd64, __VA_ARGS__) -#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ - >=, FMTd64, __VA_ARGS__) -#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ - >, FMTd64, __VA_ARGS__) -#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ - <, FMTd64, __VA_ARGS__) -#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ - <=, FMTd64, __VA_ARGS__) +#define assert_d64_eq(a, b, ...) \ + assert_cmp(int64_t, a, b, ==, !=, FMTd64, __VA_ARGS__) +#define assert_d64_ne(a, b, ...) \ + assert_cmp(int64_t, a, b, !=, ==, FMTd64, __VA_ARGS__) +#define assert_d64_lt(a, b, ...) \ + assert_cmp(int64_t, a, b, <, >=, FMTd64, __VA_ARGS__) +#define assert_d64_le(a, b, ...) \ + assert_cmp(int64_t, a, b, <=, >, FMTd64, __VA_ARGS__) +#define assert_d64_ge(a, b, ...) \ + assert_cmp(int64_t, a, b, >=, <, FMTd64, __VA_ARGS__) +#define assert_d64_gt(a, b, ...) \ + assert_cmp(int64_t, a, b, >, <=, FMTd64, __VA_ARGS__) -#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ - !=, FMTu64, __VA_ARGS__) -#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ - ==, FMTu64, __VA_ARGS__) -#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ - >=, FMTu64, __VA_ARGS__) -#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ - >, FMTu64, __VA_ARGS__) -#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ - <, FMTu64, __VA_ARGS__) -#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ - <=, FMTu64, __VA_ARGS__) +#define assert_u64_eq(a, b, ...) \ + assert_cmp(uint64_t, a, b, ==, !=, FMTu64, __VA_ARGS__) +#define assert_u64_ne(a, b, ...) \ + assert_cmp(uint64_t, a, b, !=, ==, FMTu64, __VA_ARGS__) +#define assert_u64_lt(a, b, ...) \ + assert_cmp(uint64_t, a, b, <, >=, FMTu64, __VA_ARGS__) +#define assert_u64_le(a, b, ...) \ + assert_cmp(uint64_t, a, b, <=, >, FMTu64, __VA_ARGS__) +#define assert_u64_ge(a, b, ...) \ + assert_cmp(uint64_t, a, b, >=, <, FMTu64, __VA_ARGS__) +#define assert_u64_gt(a, b, ...) \ + assert_cmp(uint64_t, a, b, >, <=, FMTu64, __VA_ARGS__) -#define assert_b_eq(a, b, ...) verify_b_eq(true, a, b, __VA_ARGS__) -#define assert_b_ne(a, b, ...) verify_b_ne(true, a, b, __VA_ARGS__) +#define assert_b_eq(a, b, ...) verify_b_eq(true, a, b, __VA_ARGS__) +#define assert_b_ne(a, b, ...) verify_b_ne(true, a, b, __VA_ARGS__) -#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) -#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) +#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) +#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) #define assert_str_eq(a, b, ...) verify_str_eq(true, a, b, __VA_ARGS__) #define assert_str_ne(a, b, ...) verify_str_ne(true, a, b, __VA_ARGS__) @@ -515,45 +516,42 @@ typedef enum { test_status_count = 3 } test_status_t; -typedef void (test_t)(void); +typedef void(test_t)(void); -#define TEST_BEGIN(f) \ -static void \ -f(void) { \ - p_test_init(#f); +#define TEST_BEGIN(f) \ + static void f(void) { \ + p_test_init(#f); -#define TEST_END \ - goto label_test_end; \ -label_test_end: \ - p_test_fini(); \ -} +#define TEST_END \ + goto label_test_end; \ + label_test_end: \ + p_test_fini(); \ + } -#define test(...) \ - p_test(__VA_ARGS__, NULL) +#define test(...) p_test(__VA_ARGS__, NULL) -#define test_no_reentrancy(...) \ - p_test_no_reentrancy(__VA_ARGS__, NULL) +#define test_no_reentrancy(...) p_test_no_reentrancy(__VA_ARGS__, NULL) -#define test_no_malloc_init(...) \ - p_test_no_malloc_init(__VA_ARGS__, NULL) +#define test_no_malloc_init(...) p_test_no_malloc_init(__VA_ARGS__, NULL) -#define test_skip_if(e) do { \ - if (e) { \ - test_skip("%s:%s:%d: Test skipped: (%s)", \ - __func__, __FILE__, __LINE__, #e); \ - goto label_test_end; \ - } \ -} while (0) +#define test_skip_if(e) \ + do { \ + if (e) { \ + test_skip("%s:%s:%d: Test skipped: (%s)", __func__, \ + __FILE__, __LINE__, #e); \ + goto label_test_end; \ + } \ + } while (0) bool test_is_reentrant(void); -void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); -void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); +void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); +void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); /* For private use by macros. */ -test_status_t p_test(test_t *t, ...); -test_status_t p_test_no_reentrancy(test_t *t, ...); -test_status_t p_test_no_malloc_init(test_t *t, ...); -void p_test_init(const char *name); -void p_test_fini(void); -void p_test_fail(bool may_abort, const char *prefix, const char *message); +test_status_t p_test(test_t *t, ...); +test_status_t p_test_no_reentrancy(test_t *t, ...); +test_status_t p_test_no_malloc_init(test_t *t, ...); +void p_test_init(const char *name); +void p_test_fini(void); +void p_test_fail(bool may_abort, const char *prefix, const char *message); diff --git a/test/include/test/timer.h b/test/include/test/timer.h index ace6191b..c1d59eb4 100644 --- a/test/include/test/timer.h +++ b/test/include/test/timer.h @@ -5,7 +5,7 @@ typedef struct { nstime_t t1; } timedelta_t; -void timer_start(timedelta_t *timer); -void timer_stop(timedelta_t *timer); -uint64_t timer_usec(const timedelta_t *timer); -void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); +void timer_start(timedelta_t *timer); +void timer_stop(timedelta_t *timer); +uint64_t timer_usec(const timedelta_t *timer); +void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); diff --git a/test/integration/MALLOCX_ARENA.c b/test/integration/MALLOCX_ARENA.c index 440ad9ef..c81566a8 100644 --- a/test/integration/MALLOCX_ARENA.c +++ b/test/integration/MALLOCX_ARENA.c @@ -6,27 +6,27 @@ void * thd_start(void *arg) { unsigned thread_ind = (unsigned)(uintptr_t)arg; unsigned arena_ind; - void *p; - size_t sz; + void *p; + size_t sz; sz = sizeof(arena_ind); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Error in arenas.create"); if (thread_ind % 4 != 3) { - size_t mib[3]; - size_t miblen = sizeof(mib) / sizeof(size_t); + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); const char *dss_precs[] = {"disabled", "primary", "secondary"}; - unsigned prec_ind = thread_ind % - (sizeof(dss_precs)/sizeof(char*)); + unsigned prec_ind = thread_ind + % (sizeof(dss_precs) / sizeof(char *)); const char *dss = dss_precs[prec_ind]; int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Error in mallctlnametomib()"); mib[1] = arena_ind; expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, - sizeof(const char *)), expected_err, - "Error in mallctlbymib()"); + sizeof(const char *)), + expected_err, "Error in mallctlbymib()"); } p = mallocx(1, MALLOCX_ARENA(arena_ind)); @@ -37,12 +37,11 @@ thd_start(void *arg) { } TEST_BEGIN(test_MALLOCX_ARENA) { - thd_t thds[NTHREADS]; + thd_t thds[NTHREADS]; unsigned i; for (i = 0; i < NTHREADS; i++) { - thd_create(&thds[i], thd_start, - (void *)(uintptr_t)i); + thd_create(&thds[i], thd_start, (void *)(uintptr_t)i); } for (i = 0; i < NTHREADS; i++) { @@ -53,6 +52,5 @@ TEST_END int main(void) { - return test( - test_MALLOCX_ARENA); + return test(test_MALLOCX_ARENA); } diff --git a/test/integration/aligned_alloc.c b/test/integration/aligned_alloc.c index b37d5ba0..1cf2a2f1 100644 --- a/test/integration/aligned_alloc.c +++ b/test/integration/aligned_alloc.c @@ -15,7 +15,7 @@ purge(void) { TEST_BEGIN(test_alignment_errors) { size_t alignment; - void *p; + void *p; alignment = 0; set_errno(0); @@ -24,17 +24,15 @@ TEST_BEGIN(test_alignment_errors) { "Expected error for invalid alignment %zu", alignment); for (alignment = sizeof(size_t); alignment < MAXALIGN; - alignment <<= 1) { + alignment <<= 1) { set_errno(0); p = aligned_alloc(alignment + 1, 1); expect_false(p != NULL || get_errno() != EINVAL, - "Expected error for invalid alignment %zu", - alignment + 1); + "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END - /* * GCC "-Walloc-size-larger-than" warning detects when one of the memory * allocation functions is called with a size larger than the maximum size that @@ -47,33 +45,31 @@ JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_oom_errors) { size_t alignment, size; - void *p; + void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); - size = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; - size = 0x80000000LU; + size = 0x80000000LU; #endif set_errno(0); p = aligned_alloc(alignment, size); expect_false(p != NULL || get_errno() != ENOMEM, - "Expected error for aligned_alloc(%zu, %zu)", - alignment, size); + "Expected error for aligned_alloc(%zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); - size = UINT64_C(0xc000000000000001); + size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; - size = 0xc0000001LU; + size = 0xc0000001LU; #endif set_errno(0); p = aligned_alloc(alignment, size); expect_false(p != NULL || get_errno() != ENOMEM, - "Expected error for aligned_alloc(%zu, %zu)", - alignment, size); + "Expected error for aligned_alloc(%zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 @@ -84,8 +80,7 @@ TEST_BEGIN(test_oom_errors) { set_errno(0); p = aligned_alloc(alignment, size); expect_false(p != NULL || get_errno() != ENOMEM, - "Expected error for aligned_alloc(&p, %zu, %zu)", - alignment, size); + "Expected error for aligned_alloc(&p, %zu, %zu)", alignment, size); } TEST_END @@ -94,21 +89,18 @@ JEMALLOC_DIAGNOSTIC_POP TEST_BEGIN(test_alignment_and_size) { #define NITER 4 - size_t alignment, size, total; + size_t alignment, size, total; unsigned i; - void *ps[NITER]; + void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { + for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; - for (size = 1; - size < 3 * alignment && size < (1U << 31); - size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (size = 1; size < 3 * alignment && size < (1U << 31); + size += (alignment >> (LG_SIZEOF_PTR - 1)) - 1) { for (i = 0; i < NITER; i++) { ps[i] = aligned_alloc(alignment, size); if (ps[i] == NULL) { @@ -149,9 +141,6 @@ TEST_END int main(void) { - return test( - test_alignment_errors, - test_oom_errors, - test_alignment_and_size, - test_zero_alloc); + return test(test_alignment_errors, test_oom_errors, + test_alignment_and_size, test_zero_alloc); } diff --git a/test/integration/allocated.c b/test/integration/allocated.c index 967e0108..2c46d916 100644 --- a/test/integration/allocated.c +++ b/test/integration/allocated.c @@ -2,27 +2,27 @@ void * thd_start(void *arg) { - int err; - void *p; - uint64_t a0, a1, d0, d1; + int err; + void *p; + uint64_t a0, a1, d0, d1; uint64_t *ap0, *ap1, *dp0, *dp1; - size_t sz, usize; + size_t sz, usize; sz = sizeof(a0); if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); + test_fail( + "%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); + test_fail( + "%s(): Error in mallctl(): %s", __func__, strerror(err)); } expect_u64_eq(*ap0, a0, "\"thread.allocatedp\" should provide a pointer to internal " @@ -33,17 +33,17 @@ thd_start(void *arg) { if (err == ENOENT) { goto label_ENOENT; } - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); + test_fail( + "%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); - if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, - 0))) { + if ((err = mallctl( + "thread.deallocatedp", (void *)&dp0, &sz, NULL, 0))) { if (err == ENOENT) { goto label_ENOENT; } - test_fail("%s(): Error in mallctl(): %s", __func__, - strerror(err)); + test_fail( + "%s(): Error in mallctl(): %s", __func__, strerror(err)); } expect_u64_eq(*dp0, d0, "\"thread.deallocatedp\" should provide a pointer to internal " @@ -107,10 +107,6 @@ TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ - return test( - test_main_thread, - test_subthread, - test_main_thread, - test_subthread, - test_main_thread); + return test(test_main_thread, test_subthread, test_main_thread, + test_subthread, test_main_thread); } diff --git a/test/integration/cpp/basic.cpp b/test/integration/cpp/basic.cpp index c1cf6cd8..e0341176 100644 --- a/test/integration/cpp/basic.cpp +++ b/test/integration/cpp/basic.cpp @@ -19,6 +19,5 @@ TEST_END int main() { - return test( - test_basic); + return test(test_basic); } diff --git a/test/integration/cpp/infallible_new_false.cpp b/test/integration/cpp/infallible_new_false.cpp index 42196d6a..5ba4f49e 100644 --- a/test/integration/cpp/infallible_new_false.cpp +++ b/test/integration/cpp/infallible_new_false.cpp @@ -17,7 +17,5 @@ TEST_END int main(void) { - return test( - test_failing_alloc); + return test(test_failing_alloc); } - diff --git a/test/integration/cpp/infallible_new_true.cpp b/test/integration/cpp/infallible_new_true.cpp index 3b2862bd..300bdd85 100644 --- a/test/integration/cpp/infallible_new_true.cpp +++ b/test/integration/cpp/infallible_new_true.cpp @@ -8,7 +8,8 @@ */ typedef void (*abort_hook_t)(const char *message); bool fake_abort_called; -void fake_abort(const char *message) { +void +fake_abort(const char *message) { const char *expected_start = ": Allocation of size"; if (strncmp(message, expected_start, strlen(expected_start)) != 0) { abort(); @@ -19,7 +20,7 @@ void fake_abort(const char *message) { static bool own_operator_new(void) { uint64_t before, after; - size_t sz = sizeof(before); + size_t sz = sizeof(before); /* thread.allocated is always available, even w/o config_stats. */ expect_d_eq(mallctl("thread.allocated", (void *)&before, &sz, NULL, 0), @@ -35,8 +36,8 @@ own_operator_new(void) { TEST_BEGIN(test_failing_alloc) { abort_hook_t abort_hook = &fake_abort; expect_d_eq(mallctl("experimental.hooks.safety_check_abort", NULL, NULL, - (void *)&abort_hook, sizeof(abort_hook)), 0, - "Unexpected mallctl failure setting abort hook"); + (void *)&abort_hook, sizeof(abort_hook)), + 0, "Unexpected mallctl failure setting abort hook"); /* * Not owning operator new is only expected to happen on MinGW which @@ -61,6 +62,5 @@ TEST_END int main(void) { - return test( - test_failing_alloc); + return test(test_failing_alloc); } diff --git a/test/integration/extent.c b/test/integration/extent.c index 7a028f18..c15bf761 100644 --- a/test/integration/extent.c +++ b/test/integration/extent.c @@ -6,26 +6,29 @@ static void test_extent_body(unsigned arena_ind) { - void *p; + void *p; size_t large0, large1, large2, sz; size_t purge_mib[3]; size_t purge_miblen; - int flags; - bool xallocx_success_a, xallocx_success_b, xallocx_success_c; + int flags; + bool xallocx_success_a, xallocx_success_b, xallocx_success_c; flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; /* Get large size classes. */ sz = sizeof(size_t); - expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, - 0), 0, "Unexpected arenas.lextent.0.size failure"); - expect_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL, - 0), 0, "Unexpected arenas.lextent.1.size failure"); - expect_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL, - 0), 0, "Unexpected arenas.lextent.2.size failure"); + expect_d_eq( + mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, + "Unexpected arenas.lextent.0.size failure"); + expect_d_eq( + mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL, 0), 0, + "Unexpected arenas.lextent.1.size failure"); + expect_d_eq( + mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL, 0), 0, + "Unexpected arenas.lextent.2.size failure"); /* Test dalloc/decommit/purge cascade. */ - purge_miblen = sizeof(purge_mib)/sizeof(size_t); + purge_miblen = sizeof(purge_mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), 0, "Unexpected mallctlnametomib() failure"); purge_mib[1] = (size_t)arena_ind; @@ -47,8 +50,8 @@ test_extent_body(unsigned arena_ind) { if (xallocx_success_a) { expect_true(called_dalloc, "Expected dalloc call"); expect_true(called_decommit, "Expected decommit call"); - expect_true(did_purge_lazy || did_purge_forced, - "Expected purge"); + expect_true( + did_purge_lazy || did_purge_forced, "Expected purge"); expect_true(called_split, "Expected split call"); } dallocx(p, flags); @@ -72,8 +75,8 @@ test_extent_body(unsigned arena_ind) { } xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2); if (did_split) { - expect_b_eq(did_decommit, did_commit, - "Expected decommit/commit match"); + expect_b_eq( + did_decommit, did_commit, "Expected decommit/commit match"); } if (xallocx_success_b && xallocx_success_c) { expect_true(did_merge, "Expected merge"); @@ -90,33 +93,34 @@ test_extent_body(unsigned arena_ind) { static void test_manual_hook_auto_arena(void) { - unsigned narenas; - size_t old_size, new_size, sz; - size_t hooks_mib[3]; - size_t hooks_miblen; + unsigned narenas; + size_t old_size, new_size, sz; + size_t hooks_mib[3]; + size_t hooks_miblen; extent_hooks_t *new_hooks, *old_hooks; extent_hooks_prep(); sz = sizeof(unsigned); /* Get number of auto arenas. */ - expect_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0), - 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); if (narenas == 1) { return; } /* Install custom extent hooks on arena 1 (might not be initialized). */ - hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); - expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, - &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_miblen = sizeof(hooks_mib) / sizeof(size_t); + expect_d_eq( + mallctlnametomib("arena.0.extent_hooks", hooks_mib, &hooks_miblen), + 0, "Unexpected mallctlnametomib() failure"); hooks_mib[1] = 1; old_size = sizeof(extent_hooks_t *); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, - &old_size, (void *)&new_hooks, new_size), 0, - "Unexpected extent_hooks error"); + &old_size, (void *)&new_hooks, new_size), + 0, "Unexpected extent_hooks error"); static bool auto_arena_created = false; if (old_hooks != &hooks) { expect_b_eq(auto_arena_created, false, @@ -127,10 +131,10 @@ test_manual_hook_auto_arena(void) { static void test_manual_hook_body(void) { - unsigned arena_ind; - size_t old_size, new_size, sz; - size_t hooks_mib[3]; - size_t hooks_miblen; + unsigned arena_ind; + size_t old_size, new_size, sz; + size_t hooks_mib[3]; + size_t hooks_miblen; extent_hooks_t *new_hooks, *old_hooks; extent_hooks_prep(); @@ -140,16 +144,17 @@ test_manual_hook_body(void) { 0, "Unexpected mallctl() failure"); /* Install custom extent hooks. */ - hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); - expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, - &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_miblen = sizeof(hooks_mib) / sizeof(size_t); + expect_d_eq( + mallctlnametomib("arena.0.extent_hooks", hooks_mib, &hooks_miblen), + 0, "Unexpected mallctlnametomib() failure"); hooks_mib[1] = (size_t)arena_ind; old_size = sizeof(extent_hooks_t *); new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, - &old_size, (void *)&new_hooks, new_size), 0, - "Unexpected extent_hooks error"); + &old_size, (void *)&new_hooks, new_size), + 0, "Unexpected extent_hooks error"); expect_ptr_ne(old_hooks->alloc, extent_alloc_hook, "Unexpected extent_hooks error"); expect_ptr_ne(old_hooks->dalloc, extent_dalloc_hook, @@ -173,10 +178,13 @@ test_manual_hook_body(void) { /* Restore extent hooks. */ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, - (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error"); + (void *)&old_hooks, new_size), + 0, "Unexpected extent_hooks error"); expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, - &old_size, NULL, 0), 0, "Unexpected extent_hooks error"); - expect_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error"); + &old_size, NULL, 0), + 0, "Unexpected extent_hooks error"); + expect_ptr_eq( + old_hooks, default_hooks, "Unexpected extent_hooks error"); expect_ptr_eq(old_hooks->alloc, default_hooks->alloc, "Unexpected extent_hooks error"); expect_ptr_eq(old_hooks->dalloc, default_hooks->dalloc, @@ -213,8 +221,8 @@ TEST_BEGIN(test_extent_manual_hook) { TEST_END TEST_BEGIN(test_extent_auto_hook) { - unsigned arena_ind; - size_t new_size, sz; + unsigned arena_ind; + size_t new_size, sz; extent_hooks_t *new_hooks; extent_hooks_prep(); @@ -223,7 +231,8 @@ TEST_BEGIN(test_extent_auto_hook) { new_hooks = &hooks; new_size = sizeof(extent_hooks_t *); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, - (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure"); + (void *)&new_hooks, new_size), + 0, "Unexpected mallctl() failure"); test_skip_if(is_background_thread_enabled()); test_extent_body(arena_ind); @@ -231,19 +240,18 @@ TEST_BEGIN(test_extent_auto_hook) { TEST_END static void -test_arenas_create_ext_base(arena_config_t config, - bool expect_hook_data, bool expect_hook_metadata) -{ +test_arenas_create_ext_base( + arena_config_t config, bool expect_hook_data, bool expect_hook_metadata) { unsigned arena, arena1; - void *ptr; - size_t sz = sizeof(unsigned); + void *ptr; + size_t sz = sizeof(unsigned); extent_hooks_prep(); called_alloc = false; - expect_d_eq(mallctl("experimental.arenas_create_ext", - (void *)&arena, &sz, &config, sizeof(arena_config_t)), 0, - "Unexpected mallctl() failure"); + expect_d_eq(mallctl("experimental.arenas_create_ext", (void *)&arena, + &sz, &config, sizeof(arena_config_t)), + 0, "Unexpected mallctl() failure"); expect_b_eq(called_alloc, expect_hook_metadata, "expected hook metadata alloc mismatch"); @@ -279,9 +287,7 @@ TEST_END int main(void) { - return test( - test_extent_manual_hook, - test_extent_auto_hook, + return test(test_extent_manual_hook, test_extent_auto_hook, test_arenas_create_ext_with_ehooks_no_metadata, test_arenas_create_ext_with_ehooks_with_metadata); } diff --git a/test/integration/malloc.c b/test/integration/malloc.c index ef449163..a77e44a6 100644 --- a/test/integration/malloc.c +++ b/test/integration/malloc.c @@ -11,6 +11,5 @@ TEST_END int main(void) { - return test( - test_zero_alloc); + return test(test_zero_alloc); } diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c index fdf1e3f4..c7ed0fb9 100644 --- a/test/integration/mallocx.c +++ b/test/integration/mallocx.c @@ -3,7 +3,7 @@ static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; - size_t z; + size_t z; z = sizeof(unsigned); expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, @@ -25,12 +25,12 @@ get_size_impl(const char *cmd, size_t ind) { size_t miblen = 4; z = sizeof(size_t); - expect_d_eq(mallctlnametomib(cmd, mib, &miblen), - 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + expect_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, + "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), - 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } @@ -64,36 +64,37 @@ JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { size_t largemax; - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); - expect_ptr_null(mallocx(largemax+1, 0), - "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); + expect_ptr_null(mallocx(largemax + 1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", largemax + 1); - expect_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), - "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + expect_ptr_null(mallocx(ZU(PTRDIFF_MAX) + 1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX) + 1); expect_ptr_null(mallocx(SIZE_T_MAX, 0), "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); - expect_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + expect_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX) + 1)), "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", - ZU(PTRDIFF_MAX)+1); + ZU(PTRDIFF_MAX) + 1); } TEST_END static void * remote_alloc(void *arg) { unsigned arena; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); size_t large_sz; sz = sizeof(size_t); - expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); - void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena) - | MALLOCX_TCACHE_NONE); + void *ptr = mallocx( + large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); void **ret = (void **)arg; *ret = ptr; @@ -114,16 +115,16 @@ TEST_BEGIN(test_remote_free) { TEST_END TEST_BEGIN(test_oom) { - size_t largemax; - bool oom; - void *ptrs[3]; + size_t largemax; + bool oom; + void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0)); @@ -143,10 +144,10 @@ TEST_BEGIN(test_oom) { #if LG_SIZEOF_PTR == 3 expect_ptr_null(mallocx(0x8000000000000000ULL, - MALLOCX_ALIGN(0x8000000000000000ULL)), + MALLOCX_ALIGN(0x8000000000000000ULL)), "Expected OOM for mallocx()"); - expect_ptr_null(mallocx(0x8000000000000000ULL, - MALLOCX_ALIGN(0x80000000)), + expect_ptr_null( + mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)), "Expected OOM for mallocx()"); #else expect_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), @@ -164,20 +165,20 @@ TEST_BEGIN(test_basic) { for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; - void *p; + void *p; nsz = nallocx(sz, 0); expect_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); - expect_ptr_not_null(p, - "Unexpected mallocx(size=%zx, flags=0) error", sz); + expect_ptr_not_null( + p, "Unexpected mallocx(size=%zx, flags=0) error", sz); rsz = sallocx(p, 0); expect_zu_ge(rsz, sz, "Real size smaller than expected"); expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); - expect_ptr_not_null(p, - "Unexpected mallocx(size=%zx, flags=0) error", sz); + expect_ptr_not_null( + p, "Unexpected mallocx(size=%zx, flags=0) error", sz); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); @@ -197,53 +198,57 @@ TEST_END TEST_BEGIN(test_alignment_and_size) { const char *percpu_arena; - size_t sz = sizeof(percpu_arena); + size_t sz = sizeof(percpu_arena); - if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || - strcmp(percpu_arena, "disabled") != 0) { - test_skip("test_alignment_and_size skipped: " + if (mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) + || strcmp(percpu_arena, "disabled") != 0) { + test_skip( + "test_alignment_and_size skipped: " "not working with percpu arena."); }; #define MAXALIGN (((size_t)1) << 23) #define NITER 4 - size_t nsz, rsz, alignment, total; + size_t nsz, rsz, alignment, total; unsigned i; - void *ps[NITER]; + void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { + for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (sz = 1; sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR - 1)) - 1) { for (i = 0; i < NITER; i++) { - nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO | MALLOCX_ARENA(0)); + nsz = nallocx(sz, + MALLOCX_ALIGN(alignment) | MALLOCX_ZERO + | MALLOCX_ARENA(0)); expect_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); - ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO | MALLOCX_ARENA(0)); + "size=%zu (%#zx)", + alignment, sz, sz); + ps[i] = mallocx(sz, + MALLOCX_ALIGN(alignment) | MALLOCX_ZERO + | MALLOCX_ARENA(0)); expect_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); + "size=%zu (%#zx)", + alignment, sz, sz); rsz = sallocx(ps[i], 0); expect_zu_ge(rsz, sz, "Real size smaller than expected for " - "alignment=%zu, size=%zu", alignment, sz); + "alignment=%zu, size=%zu", + alignment, sz); expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " - "alignment=%zu, size=%zu", alignment, sz); - expect_ptr_null( - (void *)((uintptr_t)ps[i] & (alignment-1)), - "%p inadequately aligned for" - " alignment=%zu, size=%zu", ps[i], + "alignment=%zu, size=%zu", alignment, sz); + expect_ptr_null((void *)((uintptr_t)ps[i] + & (alignment - 1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", + ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) { break; @@ -265,10 +270,6 @@ TEST_END int main(void) { - return test( - test_overflow, - test_oom, - test_remote_free, - test_basic, + return test(test_overflow, test_oom, test_remote_free, test_basic, test_alignment_and_size); } diff --git a/test/integration/overflow.c b/test/integration/overflow.c index ce63327c..17282e84 100644 --- a/test/integration/overflow.c +++ b/test/integration/overflow.c @@ -12,13 +12,14 @@ JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { unsigned nlextents; - size_t mib[4]; - size_t sz, miblen, max_size_class; - void *p; + size_t mib[4]; + size_t sz, miblen, max_size_class; + void *p; sz = sizeof(unsigned); - expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, - 0), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, + "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, @@ -26,8 +27,9 @@ TEST_BEGIN(test_overflow) { mib[2] = nlextents - 1; sz = sizeof(size_t); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, - NULL, 0), 0, "Unexpected mallctlbymib() error"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() error"); expect_ptr_null(malloc(max_size_class + 1), "Expected OOM due to over-sized allocation request"); @@ -54,6 +56,5 @@ JEMALLOC_DIAGNOSTIC_POP int main(void) { - return test( - test_overflow); + return test(test_overflow); } diff --git a/test/integration/posix_memalign.c b/test/integration/posix_memalign.c index 2da0549b..e0df56f3 100644 --- a/test/integration/posix_memalign.c +++ b/test/integration/posix_memalign.c @@ -15,48 +15,44 @@ purge(void) { TEST_BEGIN(test_alignment_errors) { size_t alignment; - void *p; + void *p; for (alignment = 0; alignment < sizeof(void *); alignment++) { expect_d_eq(posix_memalign(&p, alignment, 1), EINVAL, - "Expected error for invalid alignment %zu", - alignment); + "Expected error for invalid alignment %zu", alignment); } for (alignment = sizeof(size_t); alignment < MAXALIGN; - alignment <<= 1) { + alignment <<= 1) { expect_d_ne(posix_memalign(&p, alignment + 1, 1), 0, - "Expected error for invalid alignment %zu", - alignment + 1); + "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; - void *p; + void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); - size = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; - size = 0x80000000LU; + size = 0x80000000LU; #endif expect_d_ne(posix_memalign(&p, alignment, size), 0, - "Expected error for posix_memalign(&p, %zu, %zu)", - alignment, size); + "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); - size = UINT64_C(0xc000000000000001); + size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; - size = 0xc0000001LU; + size = 0xc0000001LU; #endif expect_d_ne(posix_memalign(&p, alignment, size), 0, - "Expected error for posix_memalign(&p, %zu, %zu)", - alignment, size); + "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 @@ -65,33 +61,29 @@ TEST_BEGIN(test_oom_errors) { size = 0xfffffff0LU; #endif expect_d_ne(posix_memalign(&p, alignment, size), 0, - "Expected error for posix_memalign(&p, %zu, %zu)", - alignment, size); + "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { #define NITER 4 - size_t alignment, size, total; + size_t alignment, size, total; unsigned i; - int err; - void *ps[NITER]; + int err; + void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { + for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; - for (size = 0; - size < 3 * alignment && size < (1U << 31); - size += ((size == 0) ? 1 : - (alignment >> (LG_SIZEOF_PTR-1)) - 1)) { + for (size = 0; size < 3 * alignment && size < (1U << 31); + size += ((size == 0) + ? 1 + : (alignment >> (LG_SIZEOF_PTR - 1)) - 1)) { for (i = 0; i < NITER; i++) { - err = posix_memalign(&ps[i], - alignment, size); + err = posix_memalign(&ps[i], alignment, size); if (err) { char buf[BUFERROR_BUF]; @@ -122,7 +114,5 @@ TEST_END int main(void) { return test( - test_alignment_errors, - test_oom_errors, - test_alignment_and_size); + test_alignment_errors, test_oom_errors, test_alignment_and_size); } diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c index 85d9238b..8e822df7 100644 --- a/test/integration/rallocx.c +++ b/test/integration/rallocx.c @@ -3,7 +3,7 @@ static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; - size_t z; + size_t z; z = sizeof(unsigned); expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, @@ -25,12 +25,12 @@ get_size_impl(const char *cmd, size_t ind) { size_t miblen = 4; z = sizeof(size_t); - expect_d_eq(mallctlnametomib(cmd, mib, &miblen), - 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + expect_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, + "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), - 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } @@ -58,25 +58,26 @@ TEST_BEGIN(test_grow_and_shrink) { szs[0] = sallocx(p, 0); for (i = 0; i < NCYCLES; i++) { - for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { - q = rallocx(p, szs[j-1]+1, 0); + for (j = 1; j < NSZS && szs[j - 1] < MAXSZ; j++) { + q = rallocx(p, szs[j - 1] + 1, 0); expect_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", - szs[j-1], szs[j-1]+1); + szs[j - 1], szs[j - 1] + 1); szs[j] = sallocx(q, 0); - expect_zu_ne(szs[j], szs[j-1]+1, - "Expected size to be at least: %zu", szs[j-1]+1); + expect_zu_ne(szs[j], szs[j - 1] + 1, + "Expected size to be at least: %zu", + szs[j - 1] + 1); p = q; } for (j--; j > 0; j--) { - q = rallocx(p, szs[j-1], 0); + q = rallocx(p, szs[j - 1], 0); expect_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", - szs[j], szs[j-1]); + szs[j], szs[j - 1]); tsz = sallocx(q, 0); - expect_zu_eq(tsz, szs[j-1], - "Expected size=%zu, got size=%zu", szs[j-1], tsz); + expect_zu_eq(tsz, szs[j - 1], + "Expected size=%zu, got size=%zu", szs[j - 1], tsz); p = q; } } @@ -99,11 +100,12 @@ validate_fill(void *p, uint8_t c, size_t offset, size_t len) { size_t i; for (i = 0; i < len; i++) { - uint8_t b = buf[offset+i]; + uint8_t b = buf[offset + i]; if (b != c) { - test_fail("Allocation at %p (len=%zu) contains %#x " - "rather than %#x at offset %zu", p, len, b, c, - offset+i); + test_fail( + "Allocation at %p (len=%zu) contains %#x " + "rather than %#x at offset %zu", + p, len, b, c, offset + i); ret = true; } } @@ -118,35 +120,37 @@ TEST_BEGIN(test_zero) { */ void *volatile p, *volatile q; size_t psz, qsz, i, j; - size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; + size_t start_sizes[] = {1, 3 * 1024, 63 * 1024, 4095 * 1024}; #define FILL_BYTE 0xaaU #define RANGE 2048 - for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { + for (i = 0; i < sizeof(start_sizes) / sizeof(size_t); i++) { size_t start_size = start_sizes[i]; p = mallocx(start_size, MALLOCX_ZERO); expect_ptr_not_null(p, "Unexpected mallocx() error"); psz = sallocx(p, 0); - expect_false(validate_fill(p, 0, 0, psz), - "Expected zeroed memory"); + expect_false( + validate_fill(p, 0, 0, psz), "Expected zeroed memory"); memset(p, FILL_BYTE, psz); expect_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); for (j = 1; j < RANGE; j++) { - q = rallocx(p, start_size+j, MALLOCX_ZERO); + q = rallocx(p, start_size + j, MALLOCX_ZERO); expect_ptr_not_null(q, "Unexpected rallocx() error"); qsz = sallocx(q, 0); if (q != p || qsz != psz) { - expect_false(validate_fill(q, FILL_BYTE, 0, - psz), "Expected filled memory"); - expect_false(validate_fill(q, 0, psz, qsz-psz), + expect_false( + validate_fill(q, FILL_BYTE, 0, psz), + "Expected filled memory"); + expect_false( + validate_fill(q, 0, psz, qsz - psz), "Expected zeroed memory"); } if (psz != qsz) { - memset((void *)((uintptr_t)q+psz), FILL_BYTE, - qsz-psz); + memset((void *)((uintptr_t)q + psz), FILL_BYTE, + qsz - psz); psz = qsz; } p = q; @@ -160,7 +164,7 @@ TEST_BEGIN(test_zero) { TEST_END TEST_BEGIN(test_align) { - void *p, *q; + void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 25) @@ -170,12 +174,10 @@ TEST_BEGIN(test_align) { for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); - expect_ptr_not_null(q, - "Unexpected rallocx() error for align=%zu", align); - expect_ptr_null( - (void *)((uintptr_t)q & (align-1)), - "%p inadequately aligned for align=%zu", - q, align); + expect_ptr_not_null( + q, "Unexpected rallocx() error for align=%zu", align); + expect_ptr_null((void *)((uintptr_t)q & (align - 1)), + "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); @@ -191,19 +193,19 @@ TEST_BEGIN(test_align_enum) { for (size_t lg_size = LG_MIN; lg_size <= LG_MAX; ++lg_size) { size_t size = 1 << lg_size; for (size_t lg_align_next = LG_MIN; - lg_align_next <= LG_MAX; ++lg_align_next) { - int flags = MALLOCX_LG_ALIGN(lg_align); + lg_align_next <= LG_MAX; ++lg_align_next) { + int flags = MALLOCX_LG_ALIGN(lg_align); void *p = mallocx(1, flags); - assert_ptr_not_null(p, - "Unexpected mallocx() error"); + assert_ptr_not_null( + p, "Unexpected mallocx() error"); assert_zu_eq(nallocx(1, flags), TEST_MALLOC_SIZE(p), "Wrong mallocx() usable size"); - int flags_next = - MALLOCX_LG_ALIGN(lg_align_next); + int flags_next = MALLOCX_LG_ALIGN( + lg_align_next); p = rallocx(p, size, flags_next); - assert_ptr_not_null(p, - "Unexpected rallocx() error"); + assert_ptr_not_null( + p, "Unexpected rallocx() error"); expect_zu_eq(nallocx(size, flags_next), TEST_MALLOC_SIZE(p), "Wrong rallocx() usable size"); @@ -223,20 +225,20 @@ TEST_BEGIN(test_lg_align_and_zero) { */ void *volatile p, *volatile q; unsigned lg_align; - size_t sz; + size_t sz; #define MAX_LG_ALIGN 25 #define MAX_VALIDATE (ZU(1) << 22) lg_align = 0; - p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + p = mallocx(1, MALLOCX_LG_ALIGN(lg_align) | MALLOCX_ZERO); expect_ptr_not_null(p, "Unexpected mallocx() error"); for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { - q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); - expect_ptr_not_null(q, - "Unexpected rallocx() error for lg_align=%u", lg_align); + q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align) | MALLOCX_ZERO); + expect_ptr_not_null( + q, "Unexpected rallocx() error for lg_align=%u", lg_align); expect_ptr_null( - (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), + (void *)((uintptr_t)q & ((ZU(1) << lg_align) - 1)), "%p inadequately aligned for lg_align=%u", q, lg_align); sz = sallocx(q, 0); if ((sz << 1) <= MAX_VALIDATE) { @@ -245,9 +247,10 @@ TEST_BEGIN(test_lg_align_and_zero) { } else { expect_false(validate_fill(q, 0, 0, MAX_VALIDATE), "Expected zeroed memory"); - expect_false(validate_fill( - (void *)((uintptr_t)q+sz-MAX_VALIDATE), - 0, 0, MAX_VALIDATE), "Expected zeroed memory"); + expect_false(validate_fill((void *)((uintptr_t)q + sz + - MAX_VALIDATE), + 0, 0, MAX_VALIDATE), + "Expected zeroed memory"); } p = q; } @@ -269,25 +272,25 @@ JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { size_t largemax; - void *p; + void *p; - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); p = mallocx(1, 0); expect_ptr_not_null(p, "Unexpected mallocx() failure"); - expect_ptr_null(rallocx(p, largemax+1, 0), - "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); + expect_ptr_null(rallocx(p, largemax + 1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", largemax + 1); - expect_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), - "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + expect_ptr_null(rallocx(p, ZU(PTRDIFF_MAX) + 1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX) + 1); expect_ptr_null(rallocx(p, SIZE_T_MAX, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); - expect_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + expect_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX) + 1)), "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", - ZU(PTRDIFF_MAX)+1); + ZU(PTRDIFF_MAX) + 1); dallocx(p, 0); } @@ -298,11 +301,6 @@ JEMALLOC_DIAGNOSTIC_POP int main(void) { - return test( - test_grow_and_shrink, - test_zero, - test_align, - test_align_enum, - test_lg_align_and_zero, - test_overflow); + return test(test_grow_and_shrink, test_zero, test_align, + test_align_enum, test_lg_align_and_zero, test_overflow); } diff --git a/test/integration/sdallocx.c b/test/integration/sdallocx.c index ca014485..ec2fb938 100644 --- a/test/integration/sdallocx.c +++ b/test/integration/sdallocx.c @@ -10,26 +10,23 @@ TEST_BEGIN(test_basic) { TEST_END TEST_BEGIN(test_alignment_and_size) { - size_t nsz, sz, alignment, total; + size_t nsz, sz, alignment, total; unsigned i; - void *ps[NITER]; + void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { + for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (sz = 1; sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR - 1)) - 1) { for (i = 0; i < NITER; i++) { - nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO); - ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO); + nsz = nallocx(sz, + MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); + ps[i] = mallocx(sz, + MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; if (total >= (MAXALIGN << 1)) { break; @@ -49,7 +46,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_basic, - test_alignment_and_size); + return test_no_reentrancy(test_basic, test_alignment_and_size); } diff --git a/test/integration/slab_sizes.c b/test/integration/slab_sizes.c index f6a66f21..f1ff67aa 100644 --- a/test/integration/slab_sizes.c +++ b/test/integration/slab_sizes.c @@ -4,10 +4,10 @@ TEST_BEGIN(test_slab_sizes) { unsigned nbins; - size_t page; - size_t sizemib[4]; - size_t slabmib[4]; - size_t len; + size_t page; + size_t sizemib[4]; + size_t slabmib[4]; + size_t len; len = sizeof(nbins); expect_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, @@ -33,12 +33,14 @@ TEST_BEGIN(test_slab_sizes) { len = sizeof(size_t); sizemib[2] = i; slabmib[2] = i; - expect_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len, - NULL, 0), 0, "bin size mallctlbymib failure"); + expect_d_eq( + mallctlbymib(sizemib, 4, (void *)&bin_size, &len, NULL, 0), + 0, "bin size mallctlbymib failure"); len = sizeof(size_t); - expect_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len, - NULL, 0), 0, "slab size mallctlbymib failure"); + expect_d_eq( + mallctlbymib(slabmib, 4, (void *)&slab_size, &len, NULL, 0), + 0, "slab size mallctlbymib failure"); if (bin_size < 100) { /* @@ -51,8 +53,7 @@ TEST_BEGIN(test_slab_sizes) { expect_zu_ge(slab_size, biggest_slab_seen, "Slab sizes should go up"); biggest_slab_seen = slab_size; - } else if ( - (100 <= bin_size && bin_size < 128) + } else if ((100 <= bin_size && bin_size < 128) || (128 < bin_size && bin_size <= 200)) { expect_zu_eq(slab_size, page, "Forced-small slabs should be small"); @@ -75,6 +76,5 @@ TEST_END int main(void) { - return test( - test_slab_sizes); + return test(test_slab_sizes); } diff --git a/test/integration/smallocx.c b/test/integration/smallocx.c index 389319b7..186a6492 100644 --- a/test/integration/smallocx.c +++ b/test/integration/smallocx.c @@ -5,25 +5,24 @@ #define STR(x) STR_HELPER(x) #ifndef JEMALLOC_VERSION_GID_IDENT - #error "JEMALLOC_VERSION_GID_IDENT not defined" +# error "JEMALLOC_VERSION_GID_IDENT not defined" #endif -#define JOIN(x, y) x ## y +#define JOIN(x, y) x##y #define JOIN2(x, y) JOIN(x, y) #define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT) typedef struct { - void *ptr; + void *ptr; size_t size; } smallocx_return_t; -extern smallocx_return_t -smallocx(size_t size, int flags); +extern smallocx_return_t smallocx(size_t size, int flags); static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; - size_t z; + size_t z; z = sizeof(unsigned); expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, @@ -45,12 +44,12 @@ get_size_impl(const char *cmd, size_t ind) { size_t miblen = 4; z = sizeof(size_t); - expect_d_eq(mallctlnametomib(cmd, mib, &miblen), - 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + expect_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, + "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), - 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } @@ -84,36 +83,37 @@ JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN TEST_BEGIN(test_overflow) { size_t largemax; - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); - expect_ptr_null(smallocx(largemax+1, 0).ptr, - "Expected OOM for smallocx(size=%#zx, 0)", largemax+1); + expect_ptr_null(smallocx(largemax + 1, 0).ptr, + "Expected OOM for smallocx(size=%#zx, 0)", largemax + 1); - expect_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr, - "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + expect_ptr_null(smallocx(ZU(PTRDIFF_MAX) + 1, 0).ptr, + "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX) + 1); expect_ptr_null(smallocx(SIZE_T_MAX, 0).ptr, "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX); - expect_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr, + expect_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX) + 1)).ptr, "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))", - ZU(PTRDIFF_MAX)+1); + ZU(PTRDIFF_MAX) + 1); } TEST_END static void * remote_alloc(void *arg) { unsigned arena; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); size_t large_sz; sz = sizeof(size_t); - expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); - smallocx_return_t r - = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); + smallocx_return_t r = smallocx( + large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); void *ptr = r.ptr; expect_zu_eq(r.size, nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE), @@ -138,16 +138,16 @@ TEST_BEGIN(test_remote_free) { TEST_END TEST_BEGIN(test_oom) { - size_t largemax; - bool oom; - void *ptrs[3]; + size_t largemax; + bool oom; + void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = smallocx(largemax, 0).ptr; @@ -167,10 +167,11 @@ TEST_BEGIN(test_oom) { #if LG_SIZEOF_PTR == 3 expect_ptr_null(smallocx(0x8000000000000000ULL, - MALLOCX_ALIGN(0x8000000000000000ULL)).ptr, + MALLOCX_ALIGN(0x8000000000000000ULL)) + .ptr, "Expected OOM for smallocx()"); - expect_ptr_null(smallocx(0x8000000000000000ULL, - MALLOCX_ALIGN(0x80000000)).ptr, + expect_ptr_null( + smallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)).ptr, "Expected OOM for smallocx()"); #else expect_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr, @@ -188,15 +189,15 @@ TEST_BEGIN(test_basic) { for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { smallocx_return_t ret; - size_t nsz, rsz, smz; - void *p; + size_t nsz, rsz, smz; + void *p; nsz = nallocx(sz, 0); expect_zu_ne(nsz, 0, "Unexpected nallocx() error"); ret = smallocx(sz, 0); p = ret.ptr; smz = ret.size; - expect_ptr_not_null(p, - "Unexpected smallocx(size=%zx, flags=0) error", sz); + expect_ptr_not_null( + p, "Unexpected smallocx(size=%zx, flags=0) error", sz); rsz = sallocx(p, 0); expect_zu_ge(rsz, sz, "Real size smaller than expected"); expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); @@ -206,8 +207,8 @@ TEST_BEGIN(test_basic) { ret = smallocx(sz, 0); p = ret.ptr; smz = ret.size; - expect_ptr_not_null(p, - "Unexpected smallocx(size=%zx, flags=0) error", sz); + expect_ptr_not_null( + p, "Unexpected smallocx(size=%zx, flags=0) error", sz); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); @@ -230,58 +231,61 @@ TEST_END TEST_BEGIN(test_alignment_and_size) { const char *percpu_arena; - size_t sz = sizeof(percpu_arena); + size_t sz = sizeof(percpu_arena); - if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || - strcmp(percpu_arena, "disabled") != 0) { - test_skip("test_alignment_and_size skipped: " + if (mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) + || strcmp(percpu_arena, "disabled") != 0) { + test_skip( + "test_alignment_and_size skipped: " "not working with percpu arena."); }; #define MAXALIGN (((size_t)1) << 23) #define NITER 4 - size_t nsz, rsz, smz, alignment, total; + size_t nsz, rsz, smz, alignment, total; unsigned i; - void *ps[NITER]; + void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { + for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (sz = 1; sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR - 1)) - 1) { for (i = 0; i < NITER; i++) { - nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO); + nsz = nallocx(sz, + MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); expect_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); - smallocx_return_t ret - = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); + "size=%zu (%#zx)", + alignment, sz, sz); + smallocx_return_t ret = smallocx(sz, + MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); ps[i] = ret.ptr; expect_ptr_not_null(ps[i], "smallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); + "size=%zu (%#zx)", + alignment, sz, sz); rsz = sallocx(ps[i], 0); smz = ret.size; expect_zu_ge(rsz, sz, "Real size smaller than expected for " - "alignment=%zu, size=%zu", alignment, sz); + "alignment=%zu, size=%zu", + alignment, sz); expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " - "alignment=%zu, size=%zu", alignment, sz); + "alignment=%zu, size=%zu", + alignment, sz); expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch for " - "alignment=%zu, size=%zu", alignment, sz); - expect_ptr_null( - (void *)((uintptr_t)ps[i] & (alignment-1)), - "%p inadequately aligned for" - " alignment=%zu, size=%zu", ps[i], + "alignment=%zu, size=%zu", alignment, sz); + expect_ptr_null((void *)((uintptr_t)ps[i] + & (alignment - 1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", + ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) { break; @@ -303,10 +307,6 @@ TEST_END int main(void) { - return test( - test_overflow, - test_oom, - test_remote_free, - test_basic, + return test(test_overflow, test_oom, test_remote_free, test_basic, test_alignment_and_size); } diff --git a/test/integration/thread_arena.c b/test/integration/thread_arena.c index 4a6abf64..48062183 100644 --- a/test/integration/thread_arena.c +++ b/test/integration/thread_arena.c @@ -5,10 +5,10 @@ void * thd_start(void *arg) { unsigned main_arena_ind = *(unsigned *)arg; - void *p; + void *p; unsigned arena_ind; - size_t size; - int err; + size_t size; + int err; p = malloc(1); expect_ptr_not_null(p, "Error in malloc()"); @@ -16,7 +16,7 @@ thd_start(void *arg) { size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, - (void *)&main_arena_ind, sizeof(main_arena_ind)))) { + (void *)&main_arena_ind, sizeof(main_arena_ind)))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); @@ -24,8 +24,8 @@ thd_start(void *arg) { } size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, - 0))) { + if ((err = mallctl( + "thread.arena", (void *)&arena_ind, &size, NULL, 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); @@ -46,28 +46,28 @@ mallctl_failure(int err) { } TEST_BEGIN(test_thread_arena) { - void *p; - int err; - thd_t thds[NTHREADS]; + void *p; + int err; + thd_t thds[NTHREADS]; unsigned i; p = malloc(1); expect_ptr_not_null(p, "Error in malloc()"); unsigned arena_ind, old_arena_ind; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Arena creation failure"); size_t size = sizeof(arena_ind); if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size, - (void *)&arena_ind, sizeof(arena_ind))) != 0) { + (void *)&arena_ind, sizeof(arena_ind))) + != 0) { mallctl_failure(err); } for (i = 0; i < NTHREADS; i++) { - thd_create(&thds[i], thd_start, - (void *)&arena_ind); + thd_create(&thds[i], thd_start, (void *)&arena_ind); } for (i = 0; i < NTHREADS; i++) { @@ -81,6 +81,5 @@ TEST_END int main(void) { - return test( - test_thread_arena); + return test(test_thread_arena); } diff --git a/test/integration/thread_tcache_enabled.c b/test/integration/thread_tcache_enabled.c index d44dbe90..3c7c95f6 100644 --- a/test/integration/thread_tcache_enabled.c +++ b/test/integration/thread_tcache_enabled.c @@ -2,60 +2,69 @@ void * thd_start(void *arg) { - bool e0, e1; + bool e0, e1; size_t sz = sizeof(bool); - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); if (e0) { e1 = false; expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_true(e0, "tcache should be enabled"); } e1 = true; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_false(e0, "tcache should be disabled"); e1 = true; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_true(e0, "tcache should be enabled"); e1 = false; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_true(e0, "tcache should be enabled"); e1 = false; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; - expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, - (void *)&e1, sz), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), + 0, "Unexpected mallctl() error"); expect_false(e0, "tcache should be disabled"); free(malloc(1)); @@ -78,10 +87,6 @@ TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ - return test( - test_main_thread, - test_subthread, - test_main_thread, - test_subthread, - test_main_thread); + return test(test_main_thread, test_subthread, test_main_thread, + test_subthread, test_main_thread); } diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c index 13708548..9b5ebcde 100644 --- a/test/integration/xallocx.c +++ b/test/integration/xallocx.c @@ -11,15 +11,16 @@ arena_ind(void) { if (ind == 0) { size_t sz = sizeof(ind); - expect_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL, - 0), 0, "Unexpected mallctl failure creating arena"); + expect_d_eq( + mallctl("arenas.create", (void *)&ind, &sz, NULL, 0), 0, + "Unexpected mallctl failure creating arena"); } return ind; } TEST_BEGIN(test_same_size) { - void *p; + void *p; size_t sz, tsz; p = mallocx(42, 0); @@ -34,14 +35,14 @@ TEST_BEGIN(test_same_size) { TEST_END TEST_BEGIN(test_extra_no_move) { - void *p; + void *p; size_t sz, tsz; p = mallocx(42, 0); expect_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); - tsz = xallocx(p, sz, sz-42, 0); + tsz = xallocx(p, sz, sz - 42, 0); expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); @@ -49,7 +50,7 @@ TEST_BEGIN(test_extra_no_move) { TEST_END TEST_BEGIN(test_no_move_fail) { - void *p; + void *p; size_t sz, tsz; p = mallocx(42, 0); @@ -66,7 +67,7 @@ TEST_END static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; - size_t z; + size_t z; z = sizeof(unsigned); expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, @@ -93,12 +94,12 @@ get_size_impl(const char *cmd, size_t ind) { size_t miblen = 4; z = sizeof(size_t); - expect_d_eq(mallctlnametomib(cmd, mib, &miblen), - 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + expect_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, + "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), - 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } @@ -115,25 +116,25 @@ get_large_size(size_t ind) { TEST_BEGIN(test_size) { size_t small0, largemax; - void *p; + void *p; /* Get size classes. */ small0 = get_small_size(0); - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); p = mallocx(small0, 0); expect_ptr_not_null(p, "Unexpected mallocx() error"); /* Test smallest supported size. */ - expect_zu_eq(xallocx(p, 1, 0, 0), small0, - "Unexpected xallocx() behavior"); + expect_zu_eq( + xallocx(p, 1, 0, 0), small0, "Unexpected xallocx() behavior"); /* Test largest supported size. */ expect_zu_le(xallocx(p, largemax, 0, 0), largemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ - expect_zu_le(xallocx(p, largemax+1, 0, 0), largemax, + expect_zu_le(xallocx(p, largemax + 1, 0, 0), largemax, "Unexpected xallocx() behavior"); expect_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax, "Unexpected xallocx() behavior"); @@ -144,29 +145,29 @@ TEST_END TEST_BEGIN(test_size_extra_overflow) { size_t small0, largemax; - void *p; + void *p; /* Get size classes. */ small0 = get_small_size(0); - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); p = mallocx(small0, 0); expect_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ - expect_zu_le(xallocx(p, largemax-1, 2, 0), largemax, + expect_zu_le(xallocx(p, largemax - 1, 2, 0), largemax, "Unexpected xallocx() behavior"); expect_zu_le(xallocx(p, largemax, 1, 0), largemax, "Unexpected xallocx() behavior"); /* Test overflow such that largemax-size underflows. */ - expect_zu_le(xallocx(p, largemax+1, 2, 0), largemax, + expect_zu_le(xallocx(p, largemax + 1, 2, 0), largemax, "Unexpected xallocx() behavior"); - expect_zu_le(xallocx(p, largemax+2, 3, 0), largemax, + expect_zu_le(xallocx(p, largemax + 2, 3, 0), largemax, "Unexpected xallocx() behavior"); - expect_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax, + expect_zu_le(xallocx(p, SIZE_T_MAX - 2, 2, 0), largemax, "Unexpected xallocx() behavior"); - expect_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax, + expect_zu_le(xallocx(p, SIZE_T_MAX - 1, 1, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); @@ -175,21 +176,21 @@ TEST_END TEST_BEGIN(test_extra_small) { size_t small0, small1, largemax; - void *p; + void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); p = mallocx(small0, 0); expect_ptr_not_null(p, "Unexpected mallocx() error"); - expect_zu_eq(xallocx(p, small1, 0, 0), small0, - "Unexpected xallocx() behavior"); + expect_zu_eq( + xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); - expect_zu_eq(xallocx(p, small1, 0, 0), small0, - "Unexpected xallocx() behavior"); + expect_zu_eq( + xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); expect_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, "Unexpected xallocx() behavior"); @@ -205,16 +206,16 @@ TEST_BEGIN(test_extra_small) { TEST_END TEST_BEGIN(test_extra_large) { - int flags = MALLOCX_ARENA(arena_ind()); + int flags = MALLOCX_ARENA(arena_ind()); size_t smallmax, large1, large2, large3, largemax; - void *p; + void *p; /* Get size classes. */ - smallmax = get_small_size(get_nsmall()-1); + smallmax = get_small_size(get_nsmall() - 1); large1 = get_large_size(1); large2 = get_large_size(2); large3 = get_large_size(3); - largemax = get_large_size(get_nlarge()-1); + largemax = get_large_size(get_nlarge() - 1); p = mallocx(large3, flags); expect_ptr_not_null(p, "Unexpected mallocx() error"); @@ -246,7 +247,7 @@ TEST_BEGIN(test_extra_large) { /* Test size increase with zero extra. */ expect_zu_le(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); - expect_zu_le(xallocx(p, largemax+1, 0, flags), large3, + expect_zu_le(xallocx(p, largemax + 1, 0, flags), large3, "Unexpected xallocx() behavior"); expect_zu_ge(xallocx(p, large1, 0, flags), large1, @@ -276,8 +277,8 @@ TEST_END static void print_filled_extents(const void *p, uint8_t c, size_t len) { const uint8_t *pc = (const uint8_t *)p; - size_t i, range0; - uint8_t c0; + size_t i, range0; + uint8_t c0; malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); range0 = 0; @@ -295,10 +296,10 @@ print_filled_extents(const void *p, uint8_t c, size_t len) { static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { const uint8_t *pc = (const uint8_t *)p; - bool err; - size_t i; + bool err; + size_t i; - for (i = offset, err = false; i < offset+len; i++) { + for (i = offset, err = false; i < offset + len; i++) { if (pc[i] != c) { err = true; } @@ -313,16 +314,16 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { static void test_zero(size_t szmin, size_t szmax) { - int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; + int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; size_t sz, nsz; - void *p; + void *p; #define FILL_BYTE 0x7aU sz = szmax; p = mallocx(sz, flags); expect_ptr_not_null(p, "Unexpected mallocx() error"); - expect_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", - sz); + expect_false( + validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", sz); /* * Fill with non-zero so that non-debug builds are more likely to detect @@ -342,16 +343,16 @@ test_zero(size_t szmin, size_t szmax) { "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { - nsz = nallocx(sz+1, flags); - if (xallocx(p, sz+1, 0, flags) != nsz) { - p = rallocx(p, sz+1, flags); + nsz = nallocx(sz + 1, flags); + if (xallocx(p, sz + 1, 0, flags) != nsz) { + p = rallocx(p, sz + 1, flags); expect_ptr_not_null(p, "Unexpected rallocx() failure"); } expect_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); - expect_false(validate_fill(p, 0x00, sz, nsz-sz), - "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); - memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); + expect_false(validate_fill(p, 0x00, sz, nsz - sz), + "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz - sz); + memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz - sz); expect_false(validate_fill(p, FILL_BYTE, 0, nsz), "Memory not filled: nsz=%zu", nsz); } @@ -372,13 +373,7 @@ TEST_END int main(void) { - return test( - test_same_size, - test_extra_no_move, - test_no_move_fail, - test_size, - test_size_extra_overflow, - test_extra_small, - test_extra_large, - test_zero_large); + return test(test_same_size, test_extra_no_move, test_no_move_fail, + test_size, test_size_extra_overflow, test_extra_small, + test_extra_large, test_zero_large); } diff --git a/test/src/SFMT.c b/test/src/SFMT.c index c05e2183..87b1fd1c 100644 --- a/test/src/SFMT.c +++ b/test/src/SFMT.c @@ -50,19 +50,19 @@ #include "test/SFMT-params.h" #if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) -#define BIG_ENDIAN64 1 +# define BIG_ENDIAN64 1 #endif #if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) -#define BIG_ENDIAN64 1 +# define BIG_ENDIAN64 1 #endif #if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) -#define BIG_ENDIAN64 1 +# define BIG_ENDIAN64 1 #endif #if defined(ONLY64) && !defined(BIG_ENDIAN64) - #if defined(__GNUC__) - #error "-DONLY64 must be specified with -DBIG_ENDIAN64" - #endif -#undef ONLY64 +# if defined(__GNUC__) +# error "-DONLY64 must be specified with -DBIG_ENDIAN64" +# endif +# undef ONLY64 #endif /*------------------------------------------------------ 128-bit SIMD data type for Altivec, SSE2 or standard C @@ -70,8 +70,8 @@ #if defined(HAVE_ALTIVEC) /** 128-bit data structure */ union W128_T { - vector unsigned int s; - uint32_t u[4]; + vector unsigned int s; + uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; @@ -79,8 +79,8 @@ typedef union W128_T w128_t; #elif defined(HAVE_SSE2) /** 128-bit data structure */ union W128_T { - __m128i si; - uint32_t u[4]; + __m128i si; + uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; @@ -89,7 +89,7 @@ typedef union W128_T w128_t; /** 128-bit data structure */ struct W128_T { - uint32_t u[4]; + uint32_t u[4]; }; /** 128-bit data type */ typedef struct W128_T w128_t; @@ -97,13 +97,13 @@ typedef struct W128_T w128_t; #endif struct sfmt_s { - /** the 128-bit internal state array */ - w128_t sfmt[N]; - /** index counter to the 32-bit internal state array */ - int idx; - /** a flag: it is 0 if and only if the internal state is not yet + /** the 128-bit internal state array */ + w128_t sfmt[N]; + /** index counter to the 32-bit internal state array */ + int idx; + /** a flag: it is 0 if and only if the internal state is not yet * initialized. */ - int initialized; + int initialized; }; /*-------------------------------------- @@ -119,22 +119,22 @@ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; ----------------*/ static inline int idxof(int i); #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -static inline void rshift128(w128_t *out, w128_t const *in, int shift); -static inline void lshift128(w128_t *out, w128_t const *in, int shift); +static inline void rshift128(w128_t *out, w128_t const *in, int shift); +static inline void lshift128(w128_t *out, w128_t const *in, int shift); #endif -static inline void gen_rand_all(sfmt_t *ctx); -static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); +static inline void gen_rand_all(sfmt_t *ctx); +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); static inline uint32_t func1(uint32_t x); static inline uint32_t func2(uint32_t x); -static void period_certification(sfmt_t *ctx); +static void period_certification(sfmt_t *ctx); #if defined(BIG_ENDIAN64) && !defined(ONLY64) static inline void swap(w128_t *array, int size); #endif #if defined(HAVE_ALTIVEC) - #include "test/SFMT-alti.h" +# include "test/SFMT-alti.h" #elif defined(HAVE_SSE2) - #include "test/SFMT-sse2.h" +# include "test/SFMT-sse2.h" #endif /** @@ -142,12 +142,14 @@ static inline void swap(w128_t *array, int size); * in BIG ENDIAN machine. */ #ifdef ONLY64 -static inline int idxof(int i) { - return i ^ 1; +static inline int +idxof(int i) { + return i ^ 1; } #else -static inline int idxof(int i) { - return i; +static inline int +idxof(int i) { + return i; } #endif /** @@ -159,37 +161,39 @@ static inline int idxof(int i) { * @param shift the shift value */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -#ifdef ONLY64 -static inline void rshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; +# ifdef ONLY64 +static inline void +rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; - th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); - tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); - oh = th >> (shift * 8); - ol = tl >> (shift * 8); - ol |= th << (64 - shift * 8); - out->u[0] = (uint32_t)(ol >> 32); - out->u[1] = (uint32_t)ol; - out->u[2] = (uint32_t)(oh >> 32); - out->u[3] = (uint32_t)oh; + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; } -#else -static inline void rshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; +# else +static inline void +rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; - th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); - tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); - oh = th >> (shift * 8); - ol = tl >> (shift * 8); - ol |= th << (64 - shift * 8); - out->u[1] = (uint32_t)(ol >> 32); - out->u[0] = (uint32_t)ol; - out->u[3] = (uint32_t)(oh >> 32); - out->u[2] = (uint32_t)oh; + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; } -#endif +# endif /** * This function simulates SIMD 128-bit left shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. @@ -198,37 +202,39 @@ static inline void rshift128(w128_t *out, w128_t const *in, int shift) { * @param in the 128-bit data to be shifted * @param shift the shift value */ -#ifdef ONLY64 -static inline void lshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; +# ifdef ONLY64 +static inline void +lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; - th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); - tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); - oh = th << (shift * 8); - ol = tl << (shift * 8); - oh |= tl >> (64 - shift * 8); - out->u[0] = (uint32_t)(ol >> 32); - out->u[1] = (uint32_t)ol; - out->u[2] = (uint32_t)(oh >> 32); - out->u[3] = (uint32_t)oh; + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; } -#else -static inline void lshift128(w128_t *out, w128_t const *in, int shift) { - uint64_t th, tl, oh, ol; +# else +static inline void +lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; - th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); - tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); - oh = th << (shift * 8); - ol = tl << (shift * 8); - oh |= tl >> (64 - shift * 8); - out->u[1] = (uint32_t)(ol >> 32); - out->u[0] = (uint32_t)ol; - out->u[3] = (uint32_t)(oh >> 32); - out->u[2] = (uint32_t)oh; + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; } -#endif +# endif #endif /** @@ -240,41 +246,41 @@ static inline void lshift128(w128_t *out, w128_t const *in, int shift) { * @param d a 128-bit part of the internal state array */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -#ifdef ONLY64 -static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, - w128_t *d) { - w128_t x; - w128_t y; +# ifdef ONLY64 +static inline void +do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { + w128_t x; + w128_t y; - lshift128(&x, a, SL2); - rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] - ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] - ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] - ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] - ^ (d->u[3] << SL1); + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] + ^ (d->u[3] << SL1); } -#else -static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, - w128_t *d) { - w128_t x; - w128_t y; +# else +static inline void +do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { + w128_t x; + w128_t y; - lshift128(&x, a, SL2); - rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] - ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] - ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] - ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] - ^ (d->u[3] << SL1); + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] + ^ (d->u[3] << SL1); } -#endif +# endif #endif #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) @@ -282,24 +288,25 @@ static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, * This function fills the internal state array with pseudorandom * integers. */ -static inline void gen_rand_all(sfmt_t *ctx) { - int i; - w128_t *r1, *r2; +static inline void +gen_rand_all(sfmt_t *ctx) { + int i; + w128_t *r1, *r2; - r1 = &ctx->sfmt[N - 2]; - r2 = &ctx->sfmt[N - 1]; - for (i = 0; i < N - POS1; i++) { - do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, - r2); - r1 = r2; - r2 = &ctx->sfmt[i]; - } - for (; i < N; i++) { - do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, - r2); - r1 = r2; - r2 = &ctx->sfmt[i]; - } + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion( + &ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } + for (; i < N; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], + &ctx->sfmt[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } } /** @@ -309,52 +316,58 @@ static inline void gen_rand_all(sfmt_t *ctx) { * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pseudorandom numbers to be generated. */ -static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { - int i, j; - w128_t *r1, *r2; +static inline void +gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + w128_t *r1, *r2; - r1 = &ctx->sfmt[N - 2]; - r2 = &ctx->sfmt[N - 1]; - for (i = 0; i < N - POS1; i++) { - do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); - r1 = r2; - r2 = &array[i]; - } - for (; i < N; i++) { - do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); - r1 = r2; - r2 = &array[i]; - } - for (; i < size - N; i++) { - do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); - r1 = r2; - r2 = &array[i]; - } - for (j = 0; j < 2 * N - size; j++) { - ctx->sfmt[j] = array[j + size - N]; - } - for (; i < size; i++, j++) { - do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); - r1 = r2; - r2 = &array[i]; - ctx->sfmt[j] = array[i]; - } + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion( + &array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < N; i++) { + do_recursion( + &array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < size - N; i++) { + do_recursion( + &array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j] = array[j + size - N]; + } + for (; i < size; i++, j++) { + do_recursion( + &array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + ctx->sfmt[j] = array[i]; + } } #endif #if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) -static inline void swap(w128_t *array, int size) { - int i; - uint32_t x, y; +static inline void +swap(w128_t *array, int size) { + int i; + uint32_t x, y; - for (i = 0; i < size; i++) { - x = array[i].u[0]; - y = array[i].u[2]; - array[i].u[0] = array[i].u[1]; - array[i].u[2] = array[i].u[3]; - array[i].u[1] = x; - array[i].u[3] = y; - } + for (i = 0; i < size; i++) { + x = array[i].u[0]; + y = array[i].u[2]; + array[i].u[0] = array[i].u[1]; + array[i].u[2] = array[i].u[3]; + array[i].u[1] = x; + array[i].u[3] = y; + } } #endif /** @@ -363,8 +376,9 @@ static inline void swap(w128_t *array, int size) { * @param x 32-bit integer * @return 32-bit integer */ -static uint32_t func1(uint32_t x) { - return (x ^ (x >> 27)) * (uint32_t)1664525UL; +static uint32_t +func1(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1664525UL; } /** @@ -373,39 +387,41 @@ static uint32_t func1(uint32_t x) { * @param x 32-bit integer * @return 32-bit integer */ -static uint32_t func2(uint32_t x) { - return (x ^ (x >> 27)) * (uint32_t)1566083941UL; +static uint32_t +func2(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1566083941UL; } /** * This function certificate the period of 2^{MEXP} */ -static void period_certification(sfmt_t *ctx) { - int inner = 0; - int i, j; - uint32_t work; - uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; +static void +period_certification(sfmt_t *ctx) { + int inner = 0; + int i, j; + uint32_t work; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; - for (i = 0; i < 4; i++) - inner ^= psfmt32[idxof(i)] & parity[i]; - for (i = 16; i > 0; i >>= 1) - inner ^= inner >> i; - inner &= 1; - /* check OK */ - if (inner == 1) { - return; - } - /* check NG, and modification */ - for (i = 0; i < 4; i++) { - work = 1; - for (j = 0; j < 32; j++) { - if ((work & parity[i]) != 0) { - psfmt32[idxof(i)] ^= work; + for (i = 0; i < 4; i++) + inner ^= psfmt32[idxof(i)] & parity[i]; + for (i = 16; i > 0; i >>= 1) + inner ^= inner >> i; + inner &= 1; + /* check OK */ + if (inner == 1) { return; - } - work = work << 1; } - } + /* check NG, and modification */ + for (i = 0; i < 4; i++) { + work = 1; + for (j = 0; j < 32; j++) { + if ((work & parity[i]) != 0) { + psfmt32[idxof(i)] ^= work; + return; + } + work = work << 1; + } + } } /*---------------- @@ -416,8 +432,9 @@ static void period_certification(sfmt_t *ctx) { * The string shows the word size, the Mersenne exponent, * and all parameters of this generator. */ -const char *get_idstring(void) { - return IDSTR; +const char * +get_idstring(void) { + return IDSTR; } /** @@ -425,8 +442,9 @@ const char *get_idstring(void) { * fill_array32() function. * @return minimum size of array used for fill_array32() function. */ -int get_min_array_size32(void) { - return N32; +int +get_min_array_size32(void) { + return N32; } /** @@ -434,8 +452,9 @@ int get_min_array_size32(void) { * fill_array64() function. * @return minimum size of array used for fill_array64() function. */ -int get_min_array_size64(void) { - return N64; +int +get_min_array_size64(void) { + return N64; } #ifndef ONLY64 @@ -444,32 +463,34 @@ int get_min_array_size64(void) { * init_gen_rand or init_by_array must be called before this function. * @return 32-bit pseudorandom number */ -uint32_t gen_rand32(sfmt_t *ctx) { - uint32_t r; - uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; +uint32_t +gen_rand32(sfmt_t *ctx) { + uint32_t r; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; - assert(ctx->initialized); - if (ctx->idx >= N32) { - gen_rand_all(ctx); - ctx->idx = 0; - } - r = psfmt32[ctx->idx++]; - return r; + assert(ctx->initialized); + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } + r = psfmt32[ctx->idx++]; + return r; } /* Generate a random integer in [0..limit). */ -uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { - uint32_t ret, above; +uint32_t +gen_rand32_range(sfmt_t *ctx, uint32_t limit) { + uint32_t ret, above; - above = 0xffffffffU - (0xffffffffU % limit); - while (1) { - ret = gen_rand32(ctx); - if (ret < above) { - ret %= limit; - break; + above = 0xffffffffU - (0xffffffffU % limit); + while (1) { + ret = gen_rand32(ctx); + if (ret < above) { + ret %= limit; + break; + } } - } - return ret; + return ret; } #endif /** @@ -479,47 +500,49 @@ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { * unless an initialization is again executed. * @return 64-bit pseudorandom number */ -uint64_t gen_rand64(sfmt_t *ctx) { +uint64_t +gen_rand64(sfmt_t *ctx) { #if defined(BIG_ENDIAN64) && !defined(ONLY64) - uint32_t r1, r2; - uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + uint32_t r1, r2; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; #else - uint64_t r; - uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; + uint64_t r; + uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; #endif - assert(ctx->initialized); - assert(ctx->idx % 2 == 0); + assert(ctx->initialized); + assert(ctx->idx % 2 == 0); - if (ctx->idx >= N32) { - gen_rand_all(ctx); - ctx->idx = 0; - } + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } #if defined(BIG_ENDIAN64) && !defined(ONLY64) - r1 = psfmt32[ctx->idx]; - r2 = psfmt32[ctx->idx + 1]; - ctx->idx += 2; - return ((uint64_t)r2 << 32) | r1; + r1 = psfmt32[ctx->idx]; + r2 = psfmt32[ctx->idx + 1]; + ctx->idx += 2; + return ((uint64_t)r2 << 32) | r1; #else - r = psfmt64[ctx->idx / 2]; - ctx->idx += 2; - return r; + r = psfmt64[ctx->idx / 2]; + ctx->idx += 2; + return r; #endif } /* Generate a random integer in [0..limit). */ -uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { - uint64_t ret, above; +uint64_t +gen_rand64_range(sfmt_t *ctx, uint64_t limit) { + uint64_t ret, above; - above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); - while (1) { - ret = gen_rand64(ctx); - if (ret < above) { - ret %= limit; - break; + above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); + while (1) { + ret = gen_rand64(ctx); + if (ret < above) { + ret %= limit; + break; + } } - } - return ret; + return ret; } #ifndef ONLY64 @@ -548,14 +571,15 @@ uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ -void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { - assert(ctx->initialized); - assert(ctx->idx == N32); - assert(size % 4 == 0); - assert(size >= N32); +void +fill_array32(sfmt_t *ctx, uint32_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 4 == 0); + assert(size >= N32); - gen_rand_array(ctx, (w128_t *)array, size / 4); - ctx->idx = N32; + gen_rand_array(ctx, (w128_t *)array, size / 4); + ctx->idx = N32; } #endif @@ -584,17 +608,18 @@ void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ -void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { - assert(ctx->initialized); - assert(ctx->idx == N32); - assert(size % 2 == 0); - assert(size >= N64); +void +fill_array64(sfmt_t *ctx, uint64_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 2 == 0); + assert(size >= N64); - gen_rand_array(ctx, (w128_t *)array, size / 2); - ctx->idx = N32; + gen_rand_array(ctx, (w128_t *)array, size / 2); + ctx->idx = N32; #if defined(BIG_ENDIAN64) && !defined(ONLY64) - swap((w128_t *)array, size /2); + swap((w128_t *)array, size / 2); #endif } @@ -604,29 +629,31 @@ void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { * * @param seed a 32-bit integer used as the seed. */ -sfmt_t *init_gen_rand(uint32_t seed) { - void *p; - sfmt_t *ctx; - int i; - uint32_t *psfmt32; +sfmt_t * +init_gen_rand(uint32_t seed) { + void *p; + sfmt_t *ctx; + int i; + uint32_t *psfmt32; - if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { - return NULL; - } - ctx = (sfmt_t *)p; - psfmt32 = &ctx->sfmt[0].u[0]; + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; - psfmt32[idxof(0)] = seed; - for (i = 1; i < N32; i++) { - psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] - ^ (psfmt32[idxof(i - 1)] >> 30)) - + i; - } - ctx->idx = N32; - period_certification(ctx); - ctx->initialized = 1; + psfmt32[idxof(0)] = seed; + for (i = 1; i < N32; i++) { + psfmt32[idxof(i)] = 1812433253UL + * (psfmt32[idxof(i - 1)] + ^ (psfmt32[idxof(i - 1)] >> 30)) + + i; + } + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; - return ctx; + return ctx; } /** @@ -635,85 +662,87 @@ sfmt_t *init_gen_rand(uint32_t seed) { * @param init_key the array of 32-bit integers, used as a seed. * @param key_length the length of init_key. */ -sfmt_t *init_by_array(uint32_t *init_key, int key_length) { - void *p; - sfmt_t *ctx; - int i, j, count; - uint32_t r; - int lag; - int mid; - int size = N * 4; - uint32_t *psfmt32; +sfmt_t * +init_by_array(uint32_t *init_key, int key_length) { + void *p; + sfmt_t *ctx; + int i, j, count; + uint32_t r; + int lag; + int mid; + int size = N * 4; + uint32_t *psfmt32; - if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { - return NULL; - } - ctx = (sfmt_t *)p; - psfmt32 = &ctx->sfmt[0].u[0]; + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; - if (size >= 623) { - lag = 11; - } else if (size >= 68) { - lag = 7; - } else if (size >= 39) { - lag = 5; - } else { - lag = 3; - } - mid = (size - lag) / 2; + if (size >= 623) { + lag = 11; + } else if (size >= 68) { + lag = 7; + } else if (size >= 39) { + lag = 5; + } else { + lag = 3; + } + mid = (size - lag) / 2; - memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); - if (key_length + 1 > N32) { - count = key_length + 1; - } else { - count = N32; - } - r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] - ^ psfmt32[idxof(N32 - 1)]); - psfmt32[idxof(mid)] += r; - r += key_length; - psfmt32[idxof(mid + lag)] += r; - psfmt32[idxof(0)] = r; + memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); + if (key_length + 1 > N32) { + count = key_length + 1; + } else { + count = N32; + } + r = func1( + psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] ^ psfmt32[idxof(N32 - 1)]); + psfmt32[idxof(mid)] += r; + r += key_length; + psfmt32[idxof(mid + lag)] += r; + psfmt32[idxof(0)] = r; - count--; - for (i = 1, j = 0; (j < count) && (j < key_length); j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] - ^ psfmt32[idxof((i + N32 - 1) % N32)]); - psfmt32[idxof((i + mid) % N32)] += r; - r += init_key[j] + i; - psfmt32[idxof((i + mid + lag) % N32)] += r; - psfmt32[idxof(i)] = r; - i = (i + 1) % N32; - } - for (; j < count; j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] - ^ psfmt32[idxof((i + N32 - 1) % N32)]); - psfmt32[idxof((i + mid) % N32)] += r; - r += i; - psfmt32[idxof((i + mid + lag) % N32)] += r; - psfmt32[idxof(i)] = r; - i = (i + 1) % N32; - } - for (j = 0; j < N32; j++) { - r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] - + psfmt32[idxof((i + N32 - 1) % N32)]); - psfmt32[idxof((i + mid) % N32)] ^= r; - r -= i; - psfmt32[idxof((i + mid + lag) % N32)] ^= r; - psfmt32[idxof(i)] = r; - i = (i + 1) % N32; - } + count--; + for (i = 1, j = 0; (j < count) && (j < key_length); j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += init_key[j] + i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (; j < count; j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (j = 0; j < N32; j++) { + r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + + psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] ^= r; + r -= i; + psfmt32[idxof((i + mid + lag) % N32)] ^= r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } - ctx->idx = N32; - period_certification(ctx); - ctx->initialized = 1; + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; - return ctx; + return ctx; } -void fini_gen_rand(sfmt_t *ctx) { - assert(ctx != NULL); +void +fini_gen_rand(sfmt_t *ctx) { + assert(ctx != NULL); - ctx->initialized = 0; - free(ctx); + ctx->initialized = 0; + free(ctx); } diff --git a/test/src/mtx.c b/test/src/mtx.c index 6cb3ecd5..05c922bf 100644 --- a/test/src/mtx.c +++ b/test/src/mtx.c @@ -1,14 +1,14 @@ #include "test/jemalloc_test.h" #if defined(_WIN32) && !defined(_CRT_SPINCOUNT) -#define _CRT_SPINCOUNT 4000 +# define _CRT_SPINCOUNT 4000 #endif bool mtx_init(mtx_t *mtx) { #ifdef _WIN32 - if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, - _CRT_SPINCOUNT)) { + if (!InitializeCriticalSectionAndSpinCount( + &mtx->lock, _CRT_SPINCOUNT)) { return true; } #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) diff --git a/test/src/sleep.c b/test/src/sleep.c index 2234b4bc..96b9b7bf 100644 --- a/test/src/sleep.c +++ b/test/src/sleep.c @@ -6,7 +6,7 @@ */ void sleep_ns(unsigned ns) { - assert(ns <= 1000*1000*1000); + assert(ns <= 1000 * 1000 * 1000); #ifdef _WIN32 Sleep(ns / 1000 / 1000); @@ -14,7 +14,7 @@ sleep_ns(unsigned ns) { { struct timespec timeout; - if (ns < 1000*1000*1000) { + if (ns < 1000 * 1000 * 1000) { timeout.tv_sec = 0; timeout.tv_nsec = ns; } else { diff --git a/test/src/test.c b/test/src/test.c index a21356d5..6eb84338 100644 --- a/test/src/test.c +++ b/test/src/test.c @@ -2,10 +2,10 @@ /* Test status state. */ -static unsigned test_count = 0; -static test_status_t test_counts[test_status_count] = {0, 0, 0}; -static test_status_t test_status = test_status_pass; -static const char * test_name = ""; +static unsigned test_count = 0; +static test_status_t test_counts[test_status_count] = {0, 0, 0}; +static test_status_t test_status = test_status_pass; +static const char *test_name = ""; /* Reentrancy testing helpers. */ @@ -89,10 +89,14 @@ test_fail(const char *format, ...) { static const char * test_status_string(test_status_t current_status) { switch (current_status) { - case test_status_pass: return "pass"; - case test_status_skip: return "skip"; - case test_status_fail: return "fail"; - default: not_reached(); + case test_status_pass: + return "pass"; + case test_status_skip: + return "skip"; + case test_status_fail: + return "fail"; + default: + not_reached(); } } @@ -173,19 +177,16 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) { } } - bool colored = test_counts[test_status_fail] != 0 && - isatty(STDERR_FILENO); + bool colored = test_counts[test_status_fail] != 0 + && isatty(STDERR_FILENO); const char *color_start = colored ? "\033[1;31m" : ""; const char *color_end = colored ? "\033[0m" : ""; malloc_printf("%s--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n%s", - color_start, - test_status_string(test_status_pass), + color_start, test_status_string(test_status_pass), test_counts[test_status_pass], test_count, - test_status_string(test_status_skip), - test_counts[test_status_skip], test_count, - test_status_string(test_status_fail), - test_counts[test_status_fail], test_count, - color_end); + test_status_string(test_status_skip), test_counts[test_status_skip], + test_count, test_status_string(test_status_fail), + test_counts[test_status_fail], test_count, color_end); return ret; } @@ -193,7 +194,7 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) { test_status_t p_test(test_t *t, ...) { test_status_t ret; - va_list ap; + va_list ap; ret = test_status_pass; va_start(ap, t); @@ -206,7 +207,7 @@ p_test(test_t *t, ...) { test_status_t p_test_no_reentrancy(test_t *t, ...) { test_status_t ret; - va_list ap; + va_list ap; ret = test_status_pass; va_start(ap, t); @@ -219,7 +220,7 @@ p_test_no_reentrancy(test_t *t, ...) { test_status_t p_test_no_malloc_init(test_t *t, ...) { test_status_t ret; - va_list ap; + va_list ap; ret = test_status_pass; va_start(ap, t); @@ -235,12 +236,12 @@ p_test_no_malloc_init(test_t *t, ...) { void p_test_fail(bool may_abort, const char *prefix, const char *message) { - bool colored = test_counts[test_status_fail] != 0 && - isatty(STDERR_FILENO); + bool colored = test_counts[test_status_fail] != 0 + && isatty(STDERR_FILENO); const char *color_start = colored ? "\033[1;31m" : ""; const char *color_end = colored ? "\033[0m" : ""; - malloc_cprintf(NULL, NULL, "%s%s%s\n%s", color_start, prefix, message, - color_end); + malloc_cprintf( + NULL, NULL, "%s%s%s\n%s", color_start, prefix, message, color_end); test_status = test_status_fail; if (may_abort) { abort(); diff --git a/test/src/thd.c b/test/src/thd.c index 8f91a595..634dc262 100644 --- a/test/src/thd.c +++ b/test/src/thd.c @@ -14,7 +14,7 @@ void thd_join(thd_t thd, void **ret) { if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { DWORD exit_code; - GetExitCodeThread(thd, (LPDWORD) &exit_code); + GetExitCodeThread(thd, (LPDWORD)&exit_code); *ret = (void *)(uintptr_t)exit_code; } } @@ -44,7 +44,8 @@ thd_setname(const char *name) { bool thd_has_setname(void) { -#if defined(JEMALLOC_HAVE_PTHREAD_SETNAME_NP) || defined(JEMALLOC_HAVE_PTHREAD_SET_NAME_NP) +#if defined(JEMALLOC_HAVE_PTHREAD_SETNAME_NP) \ + || defined(JEMALLOC_HAVE_PTHREAD_SET_NAME_NP) return true; #else return false; diff --git a/test/src/timer.c b/test/src/timer.c index 94528a34..017bf5a5 100644 --- a/test/src/timer.c +++ b/test/src/timer.c @@ -25,8 +25,8 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { uint64_t t0 = timer_usec(a); uint64_t t1 = timer_usec(b); uint64_t mult; - size_t i = 0; - size_t j, n; + size_t i = 0; + size_t j, n; /* * The time difference could be 0 if the two clock readings are @@ -36,11 +36,11 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { * Thus, bump t1 if it is 0 to avoid dividing 0. */ if (t1 == 0) { - t1 = 1; + t1 = 1; } /* Whole. */ - n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); + n = malloc_snprintf(&buf[i], buflen - i, "%" FMTu64, t0 / t1); i += n; if (i >= buflen) { return; @@ -51,15 +51,17 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { } /* Decimal. */ - n = malloc_snprintf(&buf[i], buflen-i, "."); + n = malloc_snprintf(&buf[i], buflen - i, "."); i += n; /* Fraction. */ - while (i < buflen-1) { - uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 - >= 5)) ? 1 : 0; - n = malloc_snprintf(&buf[i], buflen-i, - "%"FMTu64, (t0 * mult / t1) % 10 + round); + while (i < buflen - 1) { + uint64_t round = (i + 1 == buflen - 1 + && ((t0 * mult * 10 / t1) % 10 >= 5)) + ? 1 + : 0; + n = malloc_snprintf(&buf[i], buflen - i, "%" FMTu64, + (t0 * mult / t1) % 10 + round); i += n; mult *= 10; } diff --git a/test/stress/batch_alloc.c b/test/stress/batch_alloc.c index 6b973bb1..46ed0bf7 100644 --- a/test/stress/batch_alloc.c +++ b/test/stress/batch_alloc.c @@ -10,9 +10,9 @@ static size_t miblen = MIBLEN; #define HUGE_BATCH (1000 * 1000) #define HUGE_BATCH_ITER 100 #define LEN (100 * 1000 * 1000) -static void *batch_ptrs[LEN]; +static void *batch_ptrs[LEN]; static size_t batch_ptrs_next = 0; -static void *item_ptrs[LEN]; +static void *item_ptrs[LEN]; static size_t item_ptrs_next = 0; #define SIZE 7 @@ -22,17 +22,18 @@ struct batch_alloc_packet_s { void **ptrs; size_t num; size_t size; - int flags; + int flags; }; static void batch_alloc_wrapper(size_t batch) { - batch_alloc_packet_t batch_alloc_packet = - {batch_ptrs + batch_ptrs_next, batch, SIZE, 0}; + batch_alloc_packet_t batch_alloc_packet = { + batch_ptrs + batch_ptrs_next, batch, SIZE, 0}; size_t filled; size_t len = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, &filled, &len, - &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, ""); + &batch_alloc_packet, sizeof(batch_alloc_packet)), + 0, ""); assert_zu_eq(filled, batch, ""); } @@ -94,9 +95,9 @@ compare_without_free(size_t batch, size_t iter, batch_ptrs_next = 0; release_and_clear(item_ptrs, item_ptrs_next); item_ptrs_next = 0; - compare_funcs(0, iter, - "batch allocation", batch_alloc_without_free_func, - "item allocation", item_alloc_without_free_func); + compare_funcs(0, iter, "batch allocation", + batch_alloc_without_free_func, "item allocation", + item_alloc_without_free_func); release_and_clear(batch_ptrs, batch_ptrs_next); batch_ptrs_next = 0; release_and_clear(item_ptrs, item_ptrs_next); @@ -116,8 +117,7 @@ compare_with_free(size_t batch, size_t iter, } batch_ptrs_next = 0; item_ptrs_next = 0; - compare_funcs(0, iter, - "batch allocation", batch_alloc_with_free_func, + compare_funcs(0, iter, "batch allocation", batch_alloc_with_free_func, "item allocation", item_alloc_with_free_func); batch_ptrs_next = 0; item_ptrs_next = 0; @@ -187,12 +187,11 @@ TEST_BEGIN(test_huge_batch_with_free) { } TEST_END -int main(void) { - assert_d_eq(mallctlnametomib("experimental.batch_alloc", mib, &miblen), - 0, ""); - return test_no_reentrancy( - test_tiny_batch_without_free, - test_tiny_batch_with_free, - test_huge_batch_without_free, +int +main(void) { + assert_d_eq( + mallctlnametomib("experimental.batch_alloc", mib, &miblen), 0, ""); + return test_no_reentrancy(test_tiny_batch_without_free, + test_tiny_batch_with_free, test_huge_batch_without_free, test_huge_batch_with_free); } diff --git a/test/stress/cpp/microbench.cpp b/test/stress/cpp/microbench.cpp index 7422d1ca..0c4697a6 100644 --- a/test/stress/cpp/microbench.cpp +++ b/test/stress/cpp/microbench.cpp @@ -3,7 +3,7 @@ static void malloc_free(void) { - void* p = malloc(1); + void *p = malloc(1); expect_ptr_not_null((void *)p, "Unexpected malloc failure"); p = no_opt_ptr(p); free((void *)p); @@ -11,7 +11,7 @@ malloc_free(void) { static void new_delete(void) { - void* p = ::operator new(1); + void *p = ::operator new(1); expect_ptr_not_null((void *)p, "Unexpected new failure"); p = no_opt_ptr(p); ::operator delete((void *)p); @@ -19,7 +19,7 @@ new_delete(void) { static void malloc_free_array(void) { - void* p = malloc(sizeof(int)*8); + void *p = malloc(sizeof(int) * 8); expect_ptr_not_null((void *)p, "Unexpected malloc failure"); p = no_opt_ptr(p); free((void *)p); @@ -27,7 +27,7 @@ malloc_free_array(void) { static void new_delete_array(void) { - int* p = new int[8]; + int *p = new int[8]; expect_ptr_not_null((void *)p, "Unexpected new[] failure"); p = (int *)no_opt_ptr((void *)p); delete[] (int *)p; @@ -36,7 +36,7 @@ new_delete_array(void) { #if __cpp_sized_deallocation >= 201309 static void new_sized_delete(void) { - void* p = ::operator new(1); + void *p = ::operator new(1); expect_ptr_not_null((void *)p, "Unexpected new failure"); p = no_opt_ptr(p); ::operator delete((void *)p, 1); @@ -44,45 +44,41 @@ new_sized_delete(void) { static void malloc_sdallocx(void) { - void* p = malloc(1); + void *p = malloc(1); expect_ptr_not_null((void *)p, "Unexpected malloc failure"); p = no_opt_ptr(p); - sdallocx((void *)p, 1, 0); + sdallocx((void *)p, 1, 0); } #endif TEST_BEGIN(test_free_vs_delete) { - compare_funcs(10*1000*1000, 100*1000*1000, - "malloc_free", (void *)malloc_free, - "new_delete", (void *)new_delete); + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "malloc_free", + (void *)malloc_free, "new_delete", (void *)new_delete); } TEST_END TEST_BEGIN(test_free_array_vs_delete_array) { - compare_funcs(10*1000*1000, 100*1000*1000, - "malloc_free_array", (void *)malloc_free_array, - "delete_array", (void *)new_delete_array); + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "malloc_free_array", + (void *)malloc_free_array, "delete_array", + (void *)new_delete_array); } TEST_END - TEST_BEGIN(test_sized_delete_vs_sdallocx) { #if __cpp_sized_deallocation >= 201309 - compare_funcs(10*1000*1000, 100*1000*1000, - "new_size_delete", (void *)new_sized_delete, - "malloc_sdallocx", (void *)malloc_sdallocx); + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "new_size_delete", + (void *)new_sized_delete, "malloc_sdallocx", + (void *)malloc_sdallocx); #else - malloc_printf("Skipping test_sized_delete_vs_sdallocx since \ + malloc_printf( + "Skipping test_sized_delete_vs_sdallocx since \ sized deallocation is not enabled.\n"); #endif } TEST_END - int main() { - return test_no_reentrancy( - test_free_vs_delete, - test_free_array_vs_delete_array, - test_sized_delete_vs_sdallocx); + return test_no_reentrancy(test_free_vs_delete, + test_free_array_vs_delete_array, test_sized_delete_vs_sdallocx); } diff --git a/test/stress/fill_flush.c b/test/stress/fill_flush.c index 546bcc0b..c7b13404 100644 --- a/test/stress/fill_flush.c +++ b/test/stress/fill_flush.c @@ -35,9 +35,9 @@ item_alloc_dalloc_small(void) { } TEST_BEGIN(test_array_vs_item_small) { - compare_funcs(1 * 1000, 10 * 1000, - "array of small allocations", array_alloc_dalloc_small, - "small item allocation", item_alloc_dalloc_small); + compare_funcs(1 * 1000, 10 * 1000, "array of small allocations", + array_alloc_dalloc_small, "small item allocation", + item_alloc_dalloc_small); } TEST_END @@ -64,14 +64,14 @@ item_alloc_dalloc_large(void) { } TEST_BEGIN(test_array_vs_item_large) { - compare_funcs(100, 1000, - "array of large allocations", array_alloc_dalloc_large, - "large item allocation", item_alloc_dalloc_large); + compare_funcs(100, 1000, "array of large allocations", + array_alloc_dalloc_large, "large item allocation", + item_alloc_dalloc_large); } TEST_END -int main(void) { +int +main(void) { return test_no_reentrancy( - test_array_vs_item_small, - test_array_vs_item_large); + test_array_vs_item_small, test_array_vs_item_large); } diff --git a/test/stress/hookbench.c b/test/stress/hookbench.c index 97e90b0e..455e4c56 100644 --- a/test/stress/hookbench.c +++ b/test/stress/hookbench.c @@ -2,19 +2,16 @@ static void noop_alloc_hook(void *extra, hook_alloc_t type, void *result, - uintptr_t result_raw, uintptr_t args_raw[3]) { -} + uintptr_t result_raw, uintptr_t args_raw[3]) {} static void -noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address, - uintptr_t args_raw[3]) { -} +noop_dalloc_hook( + void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {} static void noop_expand_hook(void *extra, hook_expand_t type, void *address, size_t old_usize, size_t new_usize, uintptr_t result_raw, - uintptr_t args_raw[4]) { -} + uintptr_t args_raw[4]) {} static void malloc_free_loop(int iters) { @@ -26,23 +23,23 @@ malloc_free_loop(int iters) { static void test_hooked(int iters) { - hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook, - NULL}; + hooks_t hooks = { + &noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook, NULL}; - int err; - void *handles[HOOK_MAX]; + int err; + void *handles[HOOK_MAX]; size_t sz = sizeof(handles[0]); for (int i = 0; i < HOOK_MAX; i++) { - err = mallctl("experimental.hooks.install", &handles[i], - &sz, &hooks, sizeof(hooks)); + err = mallctl("experimental.hooks.install", &handles[i], &sz, + &hooks, sizeof(hooks)); assert(err == 0); timedelta_t timer; timer_start(&timer); malloc_free_loop(iters); timer_stop(&timer); - malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1, + malloc_printf("With %d hook%s: %" FMTu64 "us\n", i + 1, i + 1 == 1 ? "" : "s", timer_usec(&timer)); } for (int i = 0; i < HOOK_MAX; i++) { @@ -59,7 +56,7 @@ test_unhooked(int iters) { malloc_free_loop(iters); timer_stop(&timer); - malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer)); + malloc_printf("Without hooks: %" FMTu64 "us\n", timer_usec(&timer)); } int diff --git a/test/stress/large_microbench.c b/test/stress/large_microbench.c index 44a60c53..785ed836 100644 --- a/test/stress/large_microbench.c +++ b/test/stress/large_microbench.c @@ -22,14 +22,12 @@ small_mallocx_free(void) { } TEST_BEGIN(test_large_vs_small) { - compare_funcs(100*1000, 1*1000*1000, "large mallocx", + compare_funcs(100 * 1000, 1 * 1000 * 1000, "large mallocx", large_mallocx_free, "small mallocx", small_mallocx_free); } TEST_END int main(void) { - return test_no_reentrancy( - test_large_vs_small); + return test_no_reentrancy(test_large_vs_small); } - diff --git a/test/stress/mallctl.c b/test/stress/mallctl.c index d29b3118..b4c0f560 100644 --- a/test/stress/mallctl.c +++ b/test/stress/mallctl.c @@ -4,8 +4,8 @@ static void mallctl_short(void) { const char *version; - size_t sz = sizeof(version); - int err = mallctl("version", &version, &sz, NULL, 0); + size_t sz = sizeof(version); + int err = mallctl("version", &version, &sz, NULL, 0); assert_d_eq(err, 0, "mallctl failure"); } @@ -13,19 +13,19 @@ size_t mib_short[1]; static void mallctlbymib_short(void) { - size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]); + size_t miblen = sizeof(mib_short) / sizeof(mib_short[0]); const char *version; - size_t sz = sizeof(version); + size_t sz = sizeof(version); int err = mallctlbymib(mib_short, miblen, &version, &sz, NULL, 0); assert_d_eq(err, 0, "mallctlbymib failure"); } TEST_BEGIN(test_mallctl_vs_mallctlbymib_short) { - size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]); + size_t miblen = sizeof(mib_short) / sizeof(mib_short[0]); int err = mallctlnametomib("version", mib_short, &miblen); assert_d_eq(err, 0, "mallctlnametomib failure"); - compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_short", + compare_funcs(10 * 1000 * 1000, 10 * 1000 * 1000, "mallctl_short", mallctl_short, "mallctlbymib_short", mallctlbymib_short); } TEST_END @@ -33,9 +33,9 @@ TEST_END static void mallctl_long(void) { uint64_t nmalloc; - size_t sz = sizeof(nmalloc); - int err = mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, NULL, - 0); + size_t sz = sizeof(nmalloc); + int err = mallctl( + "stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, NULL, 0); assert_d_eq(err, 0, "mallctl failure"); } @@ -43,10 +43,10 @@ size_t mib_long[6]; static void mallctlbymib_long(void) { - size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]); + size_t miblen = sizeof(mib_long) / sizeof(mib_long[0]); uint64_t nmalloc; - size_t sz = sizeof(nmalloc); - int err = mallctlbymib(mib_long, miblen, &nmalloc, &sz, NULL, 0); + size_t sz = sizeof(nmalloc); + int err = mallctlbymib(mib_long, miblen, &nmalloc, &sz, NULL, 0); assert_d_eq(err, 0, "mallctlbymib failure"); } @@ -57,18 +57,17 @@ TEST_BEGIN(test_mallctl_vs_mallctlbymib_long) { */ test_skip_if(!config_stats); - size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]); - int err = mallctlnametomib("stats.arenas.0.bins.0.nmalloc", mib_long, - &miblen); + size_t miblen = sizeof(mib_long) / sizeof(mib_long[0]); + int err = mallctlnametomib( + "stats.arenas.0.bins.0.nmalloc", mib_long, &miblen); assert_d_eq(err, 0, "mallctlnametomib failure"); - compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_long", + compare_funcs(10 * 1000 * 1000, 10 * 1000 * 1000, "mallctl_long", mallctl_long, "mallctlbymib_long", mallctlbymib_long); } TEST_END int main(void) { - return test_no_reentrancy( - test_mallctl_vs_mallctlbymib_short, + return test_no_reentrancy(test_mallctl_vs_mallctlbymib_short, test_mallctl_vs_mallctlbymib_long); } diff --git a/test/stress/microbench.c b/test/stress/microbench.c index 89479b7e..3d261a92 100644 --- a/test/stress/microbench.c +++ b/test/stress/microbench.c @@ -25,7 +25,7 @@ mallocx_free(void) { } TEST_BEGIN(test_malloc_vs_mallocx) { - compare_funcs(10*1000*1000, 100*1000*1000, "malloc", + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "malloc", malloc_free, "mallocx", mallocx_free); } TEST_END @@ -53,14 +53,14 @@ malloc_sdallocx(void) { } TEST_BEGIN(test_free_vs_dallocx) { - compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "free", malloc_free, "dallocx", malloc_dallocx); } TEST_END TEST_BEGIN(test_dallocx_vs_sdallocx) { - compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, - "sdallocx", malloc_sdallocx); + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "dallocx", + malloc_dallocx, "sdallocx", malloc_sdallocx); } TEST_END @@ -94,7 +94,7 @@ malloc_sallocx_free(void) { } TEST_BEGIN(test_mus_vs_sallocx) { - compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "malloc_usable_size", malloc_mus_free, "sallocx", malloc_sallocx_free); } TEST_END @@ -116,17 +116,14 @@ malloc_nallocx_free(void) { } TEST_BEGIN(test_sallocx_vs_nallocx) { - compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", + compare_funcs(10 * 1000 * 1000, 100 * 1000 * 1000, "sallocx", malloc_sallocx_free, "nallocx", malloc_nallocx_free); } TEST_END int main(void) { - return test_no_reentrancy( - test_malloc_vs_mallocx, - test_free_vs_dallocx, - test_dallocx_vs_sdallocx, - test_mus_vs_sallocx, + return test_no_reentrancy(test_malloc_vs_mallocx, test_free_vs_dallocx, + test_dallocx_vs_sdallocx, test_mus_vs_sallocx, test_sallocx_vs_nallocx); } diff --git a/test/unit/SFMT.c b/test/unit/SFMT.c index b9f85dd9..8dbb61ed 100644 --- a/test/unit/SFMT.c +++ b/test/unit/SFMT.c @@ -40,1424 +40,1343 @@ #define COUNT_1 1000 #define COUNT_2 700 -static const uint32_t init_gen_rand_32_expected[] = { - 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, - 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, - 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, - 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, - 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, - 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, - 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, - 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, - 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, - 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, - 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, - 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, - 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, - 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, - 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, - 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, - 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, - 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, - 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, - 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, - 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, - 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, - 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, - 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, - 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, - 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, - 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, - 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, - 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, - 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, - 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, - 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, - 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, - 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, - 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, - 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, - 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, - 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, - 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, - 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, - 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, - 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, - 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, - 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, - 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, - 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, - 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, - 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, - 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, - 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, - 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, - 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, - 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, - 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, - 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, - 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, - 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, - 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, - 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, - 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, - 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, - 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, - 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, - 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, - 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, - 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, - 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, - 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, - 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, - 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, - 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, - 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, - 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, - 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, - 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, - 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, - 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, - 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, - 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, - 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, - 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, - 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, - 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, - 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, - 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, - 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, - 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, - 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, - 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, - 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, - 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, - 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, - 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, - 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, - 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, - 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, - 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, - 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, - 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, - 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, - 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, - 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, - 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, - 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, - 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, - 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, - 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, - 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, - 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, - 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, - 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, - 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, - 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, - 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, - 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, - 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, - 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, - 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, - 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, - 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, - 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, - 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, - 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, - 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, - 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, - 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, - 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, - 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, - 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, - 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, - 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, - 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, - 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, - 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, - 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, - 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, - 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, - 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, - 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, - 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, - 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, - 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, - 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, - 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, - 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, - 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, - 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, - 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, - 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, - 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, - 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, - 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, - 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, - 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, - 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, - 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, - 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, - 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, - 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, - 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, - 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, - 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, - 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, - 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, - 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, - 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, - 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, - 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, - 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, - 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, - 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, - 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, - 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, - 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, - 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, - 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, - 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, - 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, - 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, - 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, - 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, - 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, - 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, - 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, - 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, - 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, - 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, - 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, - 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, - 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, - 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, - 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, - 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, - 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, - 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, - 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, - 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, - 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, - 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, - 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U -}; -static const uint32_t init_by_array_32_expected[] = { - 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, - 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, - 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, - 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, - 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, - 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, - 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, - 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, - 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, - 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, - 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, - 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, - 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, - 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, - 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, - 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, - 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, - 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, - 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, - 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, - 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, - 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, - 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, - 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, - 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, - 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, - 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, - 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, - 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, - 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, - 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, - 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, - 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, - 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, - 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, - 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, - 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, - 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, - 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, - 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, - 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, - 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, - 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, - 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, - 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, - 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, - 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, - 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, - 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, - 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, - 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, - 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, - 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, - 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, - 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, - 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, - 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, - 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, - 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, - 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, - 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, - 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, - 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, - 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, - 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, - 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, - 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, - 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, - 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, - 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, - 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, - 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, - 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, - 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, - 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, - 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, - 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, - 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, - 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, - 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, - 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, - 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, - 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, - 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, - 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, - 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, - 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, - 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, - 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, - 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, - 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, - 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, - 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, - 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, - 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, - 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, - 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, - 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, - 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, - 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, - 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, - 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, - 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, - 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, - 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, - 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, - 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, - 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, - 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, - 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, - 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, - 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, - 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, - 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, - 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, - 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, - 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, - 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, - 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, - 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, - 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, - 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, - 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, - 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, - 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, - 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, - 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, - 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, - 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, - 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, - 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, - 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, - 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, - 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, - 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, - 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, - 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, - 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, - 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, - 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, - 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, - 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, - 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, - 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, - 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, - 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, - 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, - 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, - 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, - 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, - 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, - 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, - 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, - 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, - 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, - 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, - 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, - 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, - 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, - 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, - 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, - 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, - 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, - 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, - 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, - 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, - 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, - 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, - 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, - 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, - 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, - 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, - 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, - 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, - 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, - 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, - 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, - 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, - 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, - 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, - 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, - 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, - 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, - 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, - 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, - 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, - 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, - 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, - 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, - 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, - 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, - 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, - 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, - 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, - 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, - 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, - 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, - 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, - 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, - 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U -}; -static const uint64_t init_gen_rand_64_expected[] = { - KQU(16924766246869039260), KQU( 8201438687333352714), - KQU( 2265290287015001750), KQU(18397264611805473832), - KQU( 3375255223302384358), KQU( 6345559975416828796), - KQU(18229739242790328073), KQU( 7596792742098800905), - KQU( 255338647169685981), KQU( 2052747240048610300), - KQU(18328151576097299343), KQU(12472905421133796567), - KQU(11315245349717600863), KQU(16594110197775871209), - KQU(15708751964632456450), KQU(10452031272054632535), - KQU(11097646720811454386), KQU( 4556090668445745441), - KQU(17116187693090663106), KQU(14931526836144510645), - KQU( 9190752218020552591), KQU( 9625800285771901401), - KQU(13995141077659972832), KQU( 5194209094927829625), - KQU( 4156788379151063303), KQU( 8523452593770139494), - KQU(14082382103049296727), KQU( 2462601863986088483), - KQU( 3030583461592840678), KQU( 5221622077872827681), - KQU( 3084210671228981236), KQU(13956758381389953823), - KQU(13503889856213423831), KQU(15696904024189836170), - KQU( 4612584152877036206), KQU( 6231135538447867881), - KQU(10172457294158869468), KQU( 6452258628466708150), - KQU(14044432824917330221), KQU( 370168364480044279), - KQU(10102144686427193359), KQU( 667870489994776076), - KQU( 2732271956925885858), KQU(18027788905977284151), - KQU(15009842788582923859), KQU( 7136357960180199542), - KQU(15901736243475578127), KQU(16951293785352615701), - KQU(10551492125243691632), KQU(17668869969146434804), - KQU(13646002971174390445), KQU( 9804471050759613248), - KQU( 5511670439655935493), KQU(18103342091070400926), - KQU(17224512747665137533), KQU(15534627482992618168), - KQU( 1423813266186582647), KQU(15821176807932930024), - KQU( 30323369733607156), KQU(11599382494723479403), - KQU( 653856076586810062), KQU( 3176437395144899659), - KQU(14028076268147963917), KQU(16156398271809666195), - KQU( 3166955484848201676), KQU( 5746805620136919390), - KQU(17297845208891256593), KQU(11691653183226428483), - KQU(17900026146506981577), KQU(15387382115755971042), - KQU(16923567681040845943), KQU( 8039057517199388606), - KQU(11748409241468629263), KQU( 794358245539076095), - KQU(13438501964693401242), KQU(14036803236515618962), - KQU( 5252311215205424721), KQU(17806589612915509081), - KQU( 6802767092397596006), KQU(14212120431184557140), - KQU( 1072951366761385712), KQU(13098491780722836296), - KQU( 9466676828710797353), KQU(12673056849042830081), - KQU(12763726623645357580), KQU(16468961652999309493), - KQU(15305979875636438926), KQU(17444713151223449734), - KQU( 5692214267627883674), KQU(13049589139196151505), - KQU( 880115207831670745), KQU( 1776529075789695498), - KQU(16695225897801466485), KQU(10666901778795346845), - KQU( 6164389346722833869), KQU( 2863817793264300475), - KQU( 9464049921886304754), KQU( 3993566636740015468), - KQU( 9983749692528514136), KQU(16375286075057755211), - KQU(16042643417005440820), KQU(11445419662923489877), - KQU( 7999038846885158836), KQU( 6721913661721511535), - KQU( 5363052654139357320), KQU( 1817788761173584205), - KQU(13290974386445856444), KQU( 4650350818937984680), - KQU( 8219183528102484836), KQU( 1569862923500819899), - KQU( 4189359732136641860), KQU(14202822961683148583), - KQU( 4457498315309429058), KQU(13089067387019074834), - KQU(11075517153328927293), KQU(10277016248336668389), - KQU( 7070509725324401122), KQU(17808892017780289380), - KQU(13143367339909287349), KQU( 1377743745360085151), - KQU( 5749341807421286485), KQU(14832814616770931325), - KQU( 7688820635324359492), KQU(10960474011539770045), - KQU( 81970066653179790), KQU(12619476072607878022), - KQU( 4419566616271201744), KQU(15147917311750568503), - KQU( 5549739182852706345), KQU( 7308198397975204770), - KQU(13580425496671289278), KQU(17070764785210130301), - KQU( 8202832846285604405), KQU( 6873046287640887249), - KQU( 6927424434308206114), KQU( 6139014645937224874), - KQU(10290373645978487639), KQU(15904261291701523804), - KQU( 9628743442057826883), KQU(18383429096255546714), - KQU( 4977413265753686967), KQU( 7714317492425012869), - KQU( 9025232586309926193), KQU(14627338359776709107), - KQU(14759849896467790763), KQU(10931129435864423252), - KQU( 4588456988775014359), KQU(10699388531797056724), - KQU( 468652268869238792), KQU( 5755943035328078086), - KQU( 2102437379988580216), KQU( 9986312786506674028), - KQU( 2654207180040945604), KQU( 8726634790559960062), - KQU( 100497234871808137), KQU( 2800137176951425819), - KQU( 6076627612918553487), KQU( 5780186919186152796), - KQU( 8179183595769929098), KQU( 6009426283716221169), - KQU( 2796662551397449358), KQU( 1756961367041986764), - KQU( 6972897917355606205), KQU(14524774345368968243), - KQU( 2773529684745706940), KQU( 4853632376213075959), - KQU( 4198177923731358102), KQU( 8271224913084139776), - KQU( 2741753121611092226), KQU(16782366145996731181), - KQU(15426125238972640790), KQU(13595497100671260342), - KQU( 3173531022836259898), KQU( 6573264560319511662), - KQU(18041111951511157441), KQU( 2351433581833135952), - KQU( 3113255578908173487), KQU( 1739371330877858784), - KQU(16046126562789165480), KQU( 8072101652214192925), - KQU(15267091584090664910), KQU( 9309579200403648940), - KQU( 5218892439752408722), KQU(14492477246004337115), - KQU(17431037586679770619), KQU( 7385248135963250480), - KQU( 9580144956565560660), KQU( 4919546228040008720), - KQU(15261542469145035584), KQU(18233297270822253102), - KQU( 5453248417992302857), KQU( 9309519155931460285), - KQU(10342813012345291756), KQU(15676085186784762381), - KQU(15912092950691300645), KQU( 9371053121499003195), - KQU( 9897186478226866746), KQU(14061858287188196327), - KQU( 122575971620788119), KQU(12146750969116317754), - KQU( 4438317272813245201), KQU( 8332576791009527119), - KQU(13907785691786542057), KQU(10374194887283287467), - KQU( 2098798755649059566), KQU( 3416235197748288894), - KQU( 8688269957320773484), KQU( 7503964602397371571), - KQU(16724977015147478236), KQU( 9461512855439858184), - KQU(13259049744534534727), KQU( 3583094952542899294), - KQU( 8764245731305528292), KQU(13240823595462088985), - KQU(13716141617617910448), KQU(18114969519935960955), - KQU( 2297553615798302206), KQU( 4585521442944663362), - KQU(17776858680630198686), KQU( 4685873229192163363), - KQU( 152558080671135627), KQU(15424900540842670088), - KQU(13229630297130024108), KQU(17530268788245718717), - KQU(16675633913065714144), KQU( 3158912717897568068), - KQU(15399132185380087288), KQU( 7401418744515677872), - KQU(13135412922344398535), KQU( 6385314346100509511), - KQU(13962867001134161139), KQU(10272780155442671999), - KQU(12894856086597769142), KQU(13340877795287554994), - KQU(12913630602094607396), KQU(12543167911119793857), - KQU(17343570372251873096), KQU(10959487764494150545), - KQU( 6966737953093821128), KQU(13780699135496988601), - KQU( 4405070719380142046), KQU(14923788365607284982), - KQU( 2869487678905148380), KQU( 6416272754197188403), - KQU(15017380475943612591), KQU( 1995636220918429487), - KQU( 3402016804620122716), KQU(15800188663407057080), - KQU(11362369990390932882), KQU(15262183501637986147), - KQU(10239175385387371494), KQU( 9352042420365748334), - KQU( 1682457034285119875), KQU( 1724710651376289644), - KQU( 2038157098893817966), KQU( 9897825558324608773), - KQU( 1477666236519164736), KQU(16835397314511233640), - KQU(10370866327005346508), KQU(10157504370660621982), - KQU(12113904045335882069), KQU(13326444439742783008), - KQU(11302769043000765804), KQU(13594979923955228484), - KQU(11779351762613475968), KQU( 3786101619539298383), - KQU( 8021122969180846063), KQU(15745904401162500495), - KQU(10762168465993897267), KQU(13552058957896319026), - KQU(11200228655252462013), KQU( 5035370357337441226), - KQU( 7593918984545500013), KQU( 5418554918361528700), - KQU( 4858270799405446371), KQU( 9974659566876282544), - KQU(18227595922273957859), KQU( 2772778443635656220), - KQU(14285143053182085385), KQU( 9939700992429600469), - KQU(12756185904545598068), KQU( 2020783375367345262), - KQU( 57026775058331227), KQU( 950827867930065454), - KQU( 6602279670145371217), KQU( 2291171535443566929), - KQU( 5832380724425010313), KQU( 1220343904715982285), - KQU(17045542598598037633), KQU(15460481779702820971), - KQU(13948388779949365130), KQU(13975040175430829518), - KQU(17477538238425541763), KQU(11104663041851745725), - KQU(15860992957141157587), KQU(14529434633012950138), - KQU( 2504838019075394203), KQU( 7512113882611121886), - KQU( 4859973559980886617), KQU( 1258601555703250219), - KQU(15594548157514316394), KQU( 4516730171963773048), - KQU(11380103193905031983), KQU( 6809282239982353344), - KQU(18045256930420065002), KQU( 2453702683108791859), - KQU( 977214582986981460), KQU( 2006410402232713466), - KQU( 6192236267216378358), KQU( 3429468402195675253), - KQU(18146933153017348921), KQU(17369978576367231139), - KQU( 1246940717230386603), KQU(11335758870083327110), - KQU(14166488801730353682), KQU( 9008573127269635732), - KQU(10776025389820643815), KQU(15087605441903942962), - KQU( 1359542462712147922), KQU(13898874411226454206), - KQU(17911176066536804411), KQU( 9435590428600085274), - KQU( 294488509967864007), KQU( 8890111397567922046), - KQU( 7987823476034328778), KQU(13263827582440967651), - KQU( 7503774813106751573), KQU(14974747296185646837), - KQU( 8504765037032103375), KQU(17340303357444536213), - KQU( 7704610912964485743), KQU( 8107533670327205061), - KQU( 9062969835083315985), KQU(16968963142126734184), - KQU(12958041214190810180), KQU( 2720170147759570200), - KQU( 2986358963942189566), KQU(14884226322219356580), - KQU( 286224325144368520), KQU(11313800433154279797), - KQU(18366849528439673248), KQU(17899725929482368789), - KQU( 3730004284609106799), KQU( 1654474302052767205), - KQU( 5006698007047077032), KQU( 8196893913601182838), - KQU(15214541774425211640), KQU(17391346045606626073), - KQU( 8369003584076969089), KQU( 3939046733368550293), - KQU(10178639720308707785), KQU( 2180248669304388697), - KQU( 62894391300126322), KQU( 9205708961736223191), - KQU( 6837431058165360438), KQU( 3150743890848308214), - KQU(17849330658111464583), KQU(12214815643135450865), - KQU(13410713840519603402), KQU( 3200778126692046802), - KQU(13354780043041779313), KQU( 800850022756886036), - KQU(15660052933953067433), KQU( 6572823544154375676), - KQU(11030281857015819266), KQU(12682241941471433835), - KQU(11654136407300274693), KQU( 4517795492388641109), - KQU( 9757017371504524244), KQU(17833043400781889277), - KQU(12685085201747792227), KQU(10408057728835019573), - KQU( 98370418513455221), KQU( 6732663555696848598), - KQU(13248530959948529780), KQU( 3530441401230622826), - KQU(18188251992895660615), KQU( 1847918354186383756), - KQU( 1127392190402660921), KQU(11293734643143819463), - KQU( 3015506344578682982), KQU(13852645444071153329), - KQU( 2121359659091349142), KQU( 1294604376116677694), - KQU( 5616576231286352318), KQU( 7112502442954235625), - KQU(11676228199551561689), KQU(12925182803007305359), - KQU( 7852375518160493082), KQU( 1136513130539296154), - KQU( 5636923900916593195), KQU( 3221077517612607747), - KQU(17784790465798152513), KQU( 3554210049056995938), - KQU(17476839685878225874), KQU( 3206836372585575732), - KQU( 2765333945644823430), KQU(10080070903718799528), - KQU( 5412370818878286353), KQU( 9689685887726257728), - KQU( 8236117509123533998), KQU( 1951139137165040214), - KQU( 4492205209227980349), KQU(16541291230861602967), - KQU( 1424371548301437940), KQU( 9117562079669206794), - KQU(14374681563251691625), KQU(13873164030199921303), - KQU( 6680317946770936731), KQU(15586334026918276214), - KQU(10896213950976109802), KQU( 9506261949596413689), - KQU( 9903949574308040616), KQU( 6038397344557204470), - KQU( 174601465422373648), KQU(15946141191338238030), - KQU(17142225620992044937), KQU( 7552030283784477064), - KQU( 2947372384532947997), KQU( 510797021688197711), - KQU( 4962499439249363461), KQU( 23770320158385357), - KQU( 959774499105138124), KQU( 1468396011518788276), - KQU( 2015698006852312308), KQU( 4149400718489980136), - KQU( 5992916099522371188), KQU(10819182935265531076), - KQU(16189787999192351131), KQU( 342833961790261950), - KQU(12470830319550495336), KQU(18128495041912812501), - KQU( 1193600899723524337), KQU( 9056793666590079770), - KQU( 2154021227041669041), KQU( 4963570213951235735), - KQU( 4865075960209211409), KQU( 2097724599039942963), - KQU( 2024080278583179845), KQU(11527054549196576736), - KQU(10650256084182390252), KQU( 4808408648695766755), - KQU( 1642839215013788844), KQU(10607187948250398390), - KQU( 7076868166085913508), KQU( 730522571106887032), - KQU(12500579240208524895), KQU( 4484390097311355324), - KQU(15145801330700623870), KQU( 8055827661392944028), - KQU( 5865092976832712268), KQU(15159212508053625143), - KQU( 3560964582876483341), KQU( 4070052741344438280), - KQU( 6032585709886855634), KQU(15643262320904604873), - KQU( 2565119772293371111), KQU( 318314293065348260), - KQU(15047458749141511872), KQU( 7772788389811528730), - KQU( 7081187494343801976), KQU( 6465136009467253947), - KQU(10425940692543362069), KQU( 554608190318339115), - KQU(14796699860302125214), KQU( 1638153134431111443), - KQU(10336967447052276248), KQU( 8412308070396592958), - KQU( 4004557277152051226), KQU( 8143598997278774834), - KQU(16413323996508783221), KQU(13139418758033994949), - KQU( 9772709138335006667), KQU( 2818167159287157659), - KQU(17091740573832523669), KQU(14629199013130751608), - KQU(18268322711500338185), KQU( 8290963415675493063), - KQU( 8830864907452542588), KQU( 1614839084637494849), - KQU(14855358500870422231), KQU( 3472996748392519937), - KQU(15317151166268877716), KQU( 5825895018698400362), - KQU(16730208429367544129), KQU(10481156578141202800), - KQU( 4746166512382823750), KQU(12720876014472464998), - KQU( 8825177124486735972), KQU(13733447296837467838), - KQU( 6412293741681359625), KQU( 8313213138756135033), - KQU(11421481194803712517), KQU( 7997007691544174032), - KQU( 6812963847917605930), KQU( 9683091901227558641), - KQU(14703594165860324713), KQU( 1775476144519618309), - KQU( 2724283288516469519), KQU( 717642555185856868), - KQU( 8736402192215092346), KQU(11878800336431381021), - KQU( 4348816066017061293), KQU( 6115112756583631307), - KQU( 9176597239667142976), KQU(12615622714894259204), - KQU(10283406711301385987), KQU( 5111762509485379420), - KQU( 3118290051198688449), KQU( 7345123071632232145), - KQU( 9176423451688682359), KQU( 4843865456157868971), - KQU(12008036363752566088), KQU(12058837181919397720), - KQU( 2145073958457347366), KQU( 1526504881672818067), - KQU( 3488830105567134848), KQU(13208362960674805143), - KQU( 4077549672899572192), KQU( 7770995684693818365), - KQU( 1398532341546313593), KQU(12711859908703927840), - KQU( 1417561172594446813), KQU(17045191024194170604), - KQU( 4101933177604931713), KQU(14708428834203480320), - KQU(17447509264469407724), KQU(14314821973983434255), - KQU(17990472271061617265), KQU( 5087756685841673942), - KQU(12797820586893859939), KQU( 1778128952671092879), - KQU( 3535918530508665898), KQU( 9035729701042481301), - KQU(14808661568277079962), KQU(14587345077537747914), - KQU(11920080002323122708), KQU( 6426515805197278753), - KQU( 3295612216725984831), KQU(11040722532100876120), - KQU(12305952936387598754), KQU(16097391899742004253), - KQU( 4908537335606182208), KQU(12446674552196795504), - KQU(16010497855816895177), KQU( 9194378874788615551), - KQU( 3382957529567613384), KQU( 5154647600754974077), - KQU( 9801822865328396141), KQU( 9023662173919288143), - KQU(17623115353825147868), KQU( 8238115767443015816), - KQU(15811444159859002560), KQU( 9085612528904059661), - KQU( 6888601089398614254), KQU( 258252992894160189), - KQU( 6704363880792428622), KQU( 6114966032147235763), - KQU(11075393882690261875), KQU( 8797664238933620407), - KQU( 5901892006476726920), KQU( 5309780159285518958), - KQU(14940808387240817367), KQU(14642032021449656698), - KQU( 9808256672068504139), KQU( 3670135111380607658), - KQU(11211211097845960152), KQU( 1474304506716695808), - KQU(15843166204506876239), KQU( 7661051252471780561), - KQU(10170905502249418476), KQU( 7801416045582028589), - KQU( 2763981484737053050), KQU( 9491377905499253054), - KQU(16201395896336915095), KQU( 9256513756442782198), - KQU( 5411283157972456034), KQU( 5059433122288321676), - KQU( 4327408006721123357), KQU( 9278544078834433377), - KQU( 7601527110882281612), KQU(11848295896975505251), - KQU(12096998801094735560), KQU(14773480339823506413), - KQU(15586227433895802149), KQU(12786541257830242872), - KQU( 6904692985140503067), KQU( 5309011515263103959), - KQU(12105257191179371066), KQU(14654380212442225037), - KQU( 2556774974190695009), KQU( 4461297399927600261), - KQU(14888225660915118646), KQU(14915459341148291824), - KQU( 2738802166252327631), KQU( 6047155789239131512), - KQU(12920545353217010338), KQU(10697617257007840205), - KQU( 2751585253158203504), KQU(13252729159780047496), - KQU(14700326134672815469), KQU(14082527904374600529), - KQU(16852962273496542070), KQU(17446675504235853907), - KQU(15019600398527572311), KQU(12312781346344081551), - KQU(14524667935039810450), KQU( 5634005663377195738), - KQU(11375574739525000569), KQU( 2423665396433260040), - KQU( 5222836914796015410), KQU( 4397666386492647387), - KQU( 4619294441691707638), KQU( 665088602354770716), - KQU(13246495665281593610), KQU( 6564144270549729409), - KQU(10223216188145661688), KQU( 3961556907299230585), - KQU(11543262515492439914), KQU(16118031437285993790), - KQU( 7143417964520166465), KQU(13295053515909486772), - KQU( 40434666004899675), KQU(17127804194038347164), - KQU( 8599165966560586269), KQU( 8214016749011284903), - KQU(13725130352140465239), KQU( 5467254474431726291), - KQU( 7748584297438219877), KQU(16933551114829772472), - KQU( 2169618439506799400), KQU( 2169787627665113463), - KQU(17314493571267943764), KQU(18053575102911354912), - KQU(11928303275378476973), KQU(11593850925061715550), - KQU(17782269923473589362), KQU( 3280235307704747039), - KQU( 6145343578598685149), KQU(17080117031114086090), - KQU(18066839902983594755), KQU( 6517508430331020706), - KQU( 8092908893950411541), KQU(12558378233386153732), - KQU( 4476532167973132976), KQU(16081642430367025016), - KQU( 4233154094369139361), KQU( 8693630486693161027), - KQU(11244959343027742285), KQU(12273503967768513508), - KQU(14108978636385284876), KQU( 7242414665378826984), - KQU( 6561316938846562432), KQU( 8601038474994665795), - KQU(17532942353612365904), KQU(17940076637020912186), - KQU( 7340260368823171304), KQU( 7061807613916067905), - KQU(10561734935039519326), KQU(17990796503724650862), - KQU( 6208732943911827159), KQU( 359077562804090617), - KQU(14177751537784403113), KQU(10659599444915362902), - KQU(15081727220615085833), KQU(13417573895659757486), - KQU(15513842342017811524), KQU(11814141516204288231), - KQU( 1827312513875101814), KQU( 2804611699894603103), - KQU(17116500469975602763), KQU(12270191815211952087), - KQU(12256358467786024988), KQU(18435021722453971267), - KQU( 671330264390865618), KQU( 476504300460286050), - KQU(16465470901027093441), KQU( 4047724406247136402), - KQU( 1322305451411883346), KQU( 1388308688834322280), - KQU( 7303989085269758176), KQU( 9323792664765233642), - KQU( 4542762575316368936), KQU(17342696132794337618), - KQU( 4588025054768498379), KQU(13415475057390330804), - KQU(17880279491733405570), KQU(10610553400618620353), - KQU( 3180842072658960139), KQU(13002966655454270120), - KQU( 1665301181064982826), KQU( 7083673946791258979), - KQU( 190522247122496820), KQU(17388280237250677740), - KQU( 8430770379923642945), KQU(12987180971921668584), - KQU( 2311086108365390642), KQU( 2870984383579822345), - KQU(14014682609164653318), KQU(14467187293062251484), - KQU( 192186361147413298), KQU(15171951713531796524), - KQU( 9900305495015948728), KQU(17958004775615466344), - KQU(14346380954498606514), KQU(18040047357617407096), - KQU( 5035237584833424532), KQU(15089555460613972287), - KQU( 4131411873749729831), KQU( 1329013581168250330), - KQU(10095353333051193949), KQU(10749518561022462716), - KQU( 9050611429810755847), KQU(15022028840236655649), - KQU( 8775554279239748298), KQU(13105754025489230502), - KQU(15471300118574167585), KQU( 89864764002355628), - KQU( 8776416323420466637), KQU( 5280258630612040891), - KQU( 2719174488591862912), KQU( 7599309137399661994), - KQU(15012887256778039979), KQU(14062981725630928925), - KQU(12038536286991689603), KQU( 7089756544681775245), - KQU(10376661532744718039), KQU( 1265198725901533130), - KQU(13807996727081142408), KQU( 2935019626765036403), - KQU( 7651672460680700141), KQU( 3644093016200370795), - KQU( 2840982578090080674), KQU(17956262740157449201), - KQU(18267979450492880548), KQU(11799503659796848070), - KQU( 9942537025669672388), KQU(11886606816406990297), - KQU( 5488594946437447576), KQU( 7226714353282744302), - KQU( 3784851653123877043), KQU( 878018453244803041), - KQU(12110022586268616085), KQU( 734072179404675123), - KQU(11869573627998248542), KQU( 469150421297783998), - KQU( 260151124912803804), KQU(11639179410120968649), - KQU( 9318165193840846253), KQU(12795671722734758075), - KQU(15318410297267253933), KQU( 691524703570062620), - KQU( 5837129010576994601), KQU(15045963859726941052), - KQU( 5850056944932238169), KQU(12017434144750943807), - KQU( 7447139064928956574), KQU( 3101711812658245019), - KQU(16052940704474982954), KQU(18195745945986994042), - KQU( 8932252132785575659), KQU(13390817488106794834), - KQU(11582771836502517453), KQU( 4964411326683611686), - KQU( 2195093981702694011), KQU(14145229538389675669), - KQU(16459605532062271798), KQU( 866316924816482864), - KQU( 4593041209937286377), KQU( 8415491391910972138), - KQU( 4171236715600528969), KQU(16637569303336782889), - KQU( 2002011073439212680), KQU(17695124661097601411), - KQU( 4627687053598611702), KQU( 7895831936020190403), - KQU( 8455951300917267802), KQU( 2923861649108534854), - KQU( 8344557563927786255), KQU( 6408671940373352556), - KQU(12210227354536675772), KQU(14294804157294222295), - KQU(10103022425071085127), KQU(10092959489504123771), - KQU( 6554774405376736268), KQU(12629917718410641774), - KQU( 6260933257596067126), KQU( 2460827021439369673), - KQU( 2541962996717103668), KQU( 597377203127351475), - KQU( 5316984203117315309), KQU( 4811211393563241961), - KQU(13119698597255811641), KQU( 8048691512862388981), - KQU(10216818971194073842), KQU( 4612229970165291764), - KQU(10000980798419974770), KQU( 6877640812402540687), - KQU( 1488727563290436992), KQU( 2227774069895697318), - KQU(11237754507523316593), KQU(13478948605382290972), - KQU( 1963583846976858124), KQU( 5512309205269276457), - KQU( 3972770164717652347), KQU( 3841751276198975037), - KQU(10283343042181903117), KQU( 8564001259792872199), - KQU(16472187244722489221), KQU( 8953493499268945921), - KQU( 3518747340357279580), KQU( 4003157546223963073), - KQU( 3270305958289814590), KQU( 3966704458129482496), - KQU( 8122141865926661939), KQU(14627734748099506653), - KQU(13064426990862560568), KQU( 2414079187889870829), - KQU( 5378461209354225306), KQU(10841985740128255566), - KQU( 538582442885401738), KQU( 7535089183482905946), - KQU(16117559957598879095), KQU( 8477890721414539741), - KQU( 1459127491209533386), KQU(17035126360733620462), - KQU( 8517668552872379126), KQU(10292151468337355014), - KQU(17081267732745344157), KQU(13751455337946087178), - KQU(14026945459523832966), KQU( 6653278775061723516), - KQU(10619085543856390441), KQU( 2196343631481122885), - KQU(10045966074702826136), KQU(10082317330452718282), - KQU( 5920859259504831242), KQU( 9951879073426540617), - KQU( 7074696649151414158), KQU(15808193543879464318), - KQU( 7385247772746953374), KQU( 3192003544283864292), - KQU(18153684490917593847), KQU(12423498260668568905), - KQU(10957758099756378169), KQU(11488762179911016040), - KQU( 2099931186465333782), KQU(11180979581250294432), - KQU( 8098916250668367933), KQU( 3529200436790763465), - KQU(12988418908674681745), KQU( 6147567275954808580), - KQU( 3207503344604030989), KQU(10761592604898615360), - KQU( 229854861031893504), KQU( 8809853962667144291), - KQU(13957364469005693860), KQU( 7634287665224495886), - KQU(12353487366976556874), KQU( 1134423796317152034), - KQU( 2088992471334107068), KQU( 7393372127190799698), - KQU( 1845367839871058391), KQU( 207922563987322884), - KQU(11960870813159944976), KQU(12182120053317317363), - KQU(17307358132571709283), KQU(13871081155552824936), - KQU(18304446751741566262), KQU( 7178705220184302849), - KQU(10929605677758824425), KQU(16446976977835806844), - KQU(13723874412159769044), KQU( 6942854352100915216), - KQU( 1726308474365729390), KQU( 2150078766445323155), - KQU(15345558947919656626), KQU(12145453828874527201), - KQU( 2054448620739726849), KQU( 2740102003352628137), - KQU(11294462163577610655), KQU( 756164283387413743), - KQU(17841144758438810880), KQU(10802406021185415861), - KQU( 8716455530476737846), KQU( 6321788834517649606), - KQU(14681322910577468426), KQU(17330043563884336387), - KQU(12701802180050071614), KQU(14695105111079727151), - KQU( 5112098511654172830), KQU( 4957505496794139973), - KQU( 8270979451952045982), KQU(12307685939199120969), - KQU(12425799408953443032), KQU( 8376410143634796588), - KQU(16621778679680060464), KQU( 3580497854566660073), - KQU( 1122515747803382416), KQU( 857664980960597599), - KQU( 6343640119895925918), KQU(12878473260854462891), - KQU(10036813920765722626), KQU(14451335468363173812), - KQU( 5476809692401102807), KQU(16442255173514366342), - KQU(13060203194757167104), KQU(14354124071243177715), - KQU(15961249405696125227), KQU(13703893649690872584), - KQU( 363907326340340064), KQU( 6247455540491754842), - KQU(12242249332757832361), KQU( 156065475679796717), - KQU( 9351116235749732355), KQU( 4590350628677701405), - KQU( 1671195940982350389), KQU(13501398458898451905), - KQU( 6526341991225002255), KQU( 1689782913778157592), - KQU( 7439222350869010334), KQU(13975150263226478308), - KQU(11411961169932682710), KQU(17204271834833847277), - KQU( 541534742544435367), KQU( 6591191931218949684), - KQU( 2645454775478232486), KQU( 4322857481256485321), - KQU( 8477416487553065110), KQU(12902505428548435048), - KQU( 971445777981341415), KQU(14995104682744976712), - KQU( 4243341648807158063), KQU( 8695061252721927661), - KQU( 5028202003270177222), KQU( 2289257340915567840), - KQU(13870416345121866007), KQU(13994481698072092233), - KQU( 6912785400753196481), KQU( 2278309315841980139), - KQU( 4329765449648304839), KQU( 5963108095785485298), - KQU( 4880024847478722478), KQU(16015608779890240947), - KQU( 1866679034261393544), KQU( 914821179919731519), - KQU( 9643404035648760131), KQU( 2418114953615593915), - KQU( 944756836073702374), KQU(15186388048737296834), - KQU( 7723355336128442206), KQU( 7500747479679599691), - KQU(18013961306453293634), KQU( 2315274808095756456), - KQU(13655308255424029566), KQU(17203800273561677098), - KQU( 1382158694422087756), KQU( 5090390250309588976), - KQU( 517170818384213989), KQU( 1612709252627729621), - KQU( 1330118955572449606), KQU( 300922478056709885), - KQU(18115693291289091987), KQU(13491407109725238321), - KQU(15293714633593827320), KQU( 5151539373053314504), - KQU( 5951523243743139207), KQU(14459112015249527975), - KQU( 5456113959000700739), KQU( 3877918438464873016), - KQU(12534071654260163555), KQU(15871678376893555041), - KQU(11005484805712025549), KQU(16353066973143374252), - KQU( 4358331472063256685), KQU( 8268349332210859288), - KQU(12485161590939658075), KQU(13955993592854471343), - KQU( 5911446886848367039), KQU(14925834086813706974), - KQU( 6590362597857994805), KQU( 1280544923533661875), - KQU( 1637756018947988164), KQU( 4734090064512686329), - KQU(16693705263131485912), KQU( 6834882340494360958), - KQU( 8120732176159658505), KQU( 2244371958905329346), - KQU(10447499707729734021), KQU( 7318742361446942194), - KQU( 8032857516355555296), KQU(14023605983059313116), - KQU( 1032336061815461376), KQU( 9840995337876562612), - KQU( 9869256223029203587), KQU(12227975697177267636), - KQU(12728115115844186033), KQU( 7752058479783205470), - KQU( 729733219713393087), KQU(12954017801239007622) -}; -static const uint64_t init_by_array_64_expected[] = { - KQU( 2100341266307895239), KQU( 8344256300489757943), - KQU(15687933285484243894), KQU( 8268620370277076319), - KQU(12371852309826545459), KQU( 8800491541730110238), - KQU(18113268950100835773), KQU( 2886823658884438119), - KQU( 3293667307248180724), KQU( 9307928143300172731), - KQU( 7688082017574293629), KQU( 900986224735166665), - KQU( 9977972710722265039), KQU( 6008205004994830552), - KQU( 546909104521689292), KQU( 7428471521869107594), - KQU(14777563419314721179), KQU(16116143076567350053), - KQU( 5322685342003142329), KQU( 4200427048445863473), - KQU( 4693092150132559146), KQU(13671425863759338582), - KQU( 6747117460737639916), KQU( 4732666080236551150), - KQU( 5912839950611941263), KQU( 3903717554504704909), - KQU( 2615667650256786818), KQU(10844129913887006352), - KQU(13786467861810997820), KQU(14267853002994021570), - KQU(13767807302847237439), KQU(16407963253707224617), - KQU( 4802498363698583497), KQU( 2523802839317209764), - KQU( 3822579397797475589), KQU( 8950320572212130610), - KQU( 3745623504978342534), KQU(16092609066068482806), - KQU( 9817016950274642398), KQU(10591660660323829098), - KQU(11751606650792815920), KQU( 5122873818577122211), - KQU(17209553764913936624), KQU( 6249057709284380343), - KQU(15088791264695071830), KQU(15344673071709851930), - KQU( 4345751415293646084), KQU( 2542865750703067928), - KQU(13520525127852368784), KQU(18294188662880997241), - KQU( 3871781938044881523), KQU( 2873487268122812184), - KQU(15099676759482679005), KQU(15442599127239350490), - KQU( 6311893274367710888), KQU( 3286118760484672933), - KQU( 4146067961333542189), KQU(13303942567897208770), - KQU( 8196013722255630418), KQU( 4437815439340979989), - KQU(15433791533450605135), KQU( 4254828956815687049), - KQU( 1310903207708286015), KQU(10529182764462398549), - KQU(14900231311660638810), KQU( 9727017277104609793), - KQU( 1821308310948199033), KQU(11628861435066772084), - KQU( 9469019138491546924), KQU( 3145812670532604988), - KQU( 9938468915045491919), KQU( 1562447430672662142), - KQU(13963995266697989134), KQU( 3356884357625028695), - KQU( 4499850304584309747), KQU( 8456825817023658122), - KQU(10859039922814285279), KQU( 8099512337972526555), - KQU( 348006375109672149), KQU(11919893998241688603), - KQU( 1104199577402948826), KQU(16689191854356060289), - KQU(10992552041730168078), KQU( 7243733172705465836), - KQU( 5668075606180319560), KQU(18182847037333286970), - KQU( 4290215357664631322), KQU( 4061414220791828613), - KQU(13006291061652989604), KQU( 7140491178917128798), - KQU(12703446217663283481), KQU( 5500220597564558267), - KQU(10330551509971296358), KQU(15958554768648714492), - KQU( 5174555954515360045), KQU( 1731318837687577735), - KQU( 3557700801048354857), KQU(13764012341928616198), - KQU(13115166194379119043), KQU( 7989321021560255519), - KQU( 2103584280905877040), KQU( 9230788662155228488), - KQU(16396629323325547654), KQU( 657926409811318051), - KQU(15046700264391400727), KQU( 5120132858771880830), - KQU( 7934160097989028561), KQU( 6963121488531976245), - KQU(17412329602621742089), KQU(15144843053931774092), - KQU(17204176651763054532), KQU(13166595387554065870), - KQU( 8590377810513960213), KQU( 5834365135373991938), - KQU( 7640913007182226243), KQU( 3479394703859418425), - KQU(16402784452644521040), KQU( 4993979809687083980), - KQU(13254522168097688865), KQU(15643659095244365219), - KQU( 5881437660538424982), KQU(11174892200618987379), - KQU( 254409966159711077), KQU(17158413043140549909), - KQU( 3638048789290376272), KQU( 1376816930299489190), - KQU( 4622462095217761923), KQU(15086407973010263515), - KQU(13253971772784692238), KQU( 5270549043541649236), - KQU(11182714186805411604), KQU(12283846437495577140), - KQU( 5297647149908953219), KQU(10047451738316836654), - KQU( 4938228100367874746), KQU(12328523025304077923), - KQU( 3601049438595312361), KQU( 9313624118352733770), - KQU(13322966086117661798), KQU(16660005705644029394), - KQU(11337677526988872373), KQU(13869299102574417795), - KQU(15642043183045645437), KQU( 3021755569085880019), - KQU( 4979741767761188161), KQU(13679979092079279587), - KQU( 3344685842861071743), KQU(13947960059899588104), - KQU( 305806934293368007), KQU( 5749173929201650029), - KQU(11123724852118844098), KQU(15128987688788879802), - KQU(15251651211024665009), KQU( 7689925933816577776), - KQU(16732804392695859449), KQU(17087345401014078468), - KQU(14315108589159048871), KQU( 4820700266619778917), - KQU(16709637539357958441), KQU( 4936227875177351374), - KQU( 2137907697912987247), KQU(11628565601408395420), - KQU( 2333250549241556786), KQU( 5711200379577778637), - KQU( 5170680131529031729), KQU(12620392043061335164), - KQU( 95363390101096078), KQU( 5487981914081709462), - KQU( 1763109823981838620), KQU( 3395861271473224396), - KQU( 1300496844282213595), KQU( 6894316212820232902), - KQU(10673859651135576674), KQU( 5911839658857903252), - KQU(17407110743387299102), KQU( 8257427154623140385), - KQU(11389003026741800267), KQU( 4070043211095013717), - KQU(11663806997145259025), KQU(15265598950648798210), - KQU( 630585789434030934), KQU( 3524446529213587334), - KQU( 7186424168495184211), KQU(10806585451386379021), - KQU(11120017753500499273), KQU( 1586837651387701301), - KQU(17530454400954415544), KQU( 9991670045077880430), - KQU( 7550997268990730180), KQU( 8640249196597379304), - KQU( 3522203892786893823), KQU(10401116549878854788), - KQU(13690285544733124852), KQU( 8295785675455774586), - KQU(15535716172155117603), KQU( 3112108583723722511), - KQU(17633179955339271113), KQU(18154208056063759375), - KQU( 1866409236285815666), KQU(13326075895396412882), - KQU( 8756261842948020025), KQU( 6281852999868439131), - KQU(15087653361275292858), KQU(10333923911152949397), - KQU( 5265567645757408500), KQU(12728041843210352184), - KQU( 6347959327507828759), KQU( 154112802625564758), - KQU(18235228308679780218), KQU( 3253805274673352418), - KQU( 4849171610689031197), KQU(17948529398340432518), - KQU(13803510475637409167), KQU(13506570190409883095), - KQU(15870801273282960805), KQU( 8451286481299170773), - KQU( 9562190620034457541), KQU( 8518905387449138364), - KQU(12681306401363385655), KQU( 3788073690559762558), - KQU( 5256820289573487769), KQU( 2752021372314875467), - KQU( 6354035166862520716), KQU( 4328956378309739069), - KQU( 449087441228269600), KQU( 5533508742653090868), - KQU( 1260389420404746988), KQU(18175394473289055097), - KQU( 1535467109660399420), KQU( 8818894282874061442), - KQU(12140873243824811213), KQU(15031386653823014946), - KQU( 1286028221456149232), KQU( 6329608889367858784), - KQU( 9419654354945132725), KQU( 6094576547061672379), - KQU(17706217251847450255), KQU( 1733495073065878126), - KQU(16918923754607552663), KQU( 8881949849954945044), - KQU(12938977706896313891), KQU(14043628638299793407), - KQU(18393874581723718233), KQU( 6886318534846892044), - KQU(14577870878038334081), KQU(13541558383439414119), - KQU(13570472158807588273), KQU(18300760537910283361), - KQU( 818368572800609205), KQU( 1417000585112573219), - KQU(12337533143867683655), KQU(12433180994702314480), - KQU( 778190005829189083), KQU(13667356216206524711), - KQU( 9866149895295225230), KQU(11043240490417111999), - KQU( 1123933826541378598), KQU( 6469631933605123610), - KQU(14508554074431980040), KQU(13918931242962026714), - KQU( 2870785929342348285), KQU(14786362626740736974), - KQU(13176680060902695786), KQU( 9591778613541679456), - KQU( 9097662885117436706), KQU( 749262234240924947), - KQU( 1944844067793307093), KQU( 4339214904577487742), - KQU( 8009584152961946551), KQU(16073159501225501777), - KQU( 3335870590499306217), KQU(17088312653151202847), - KQU( 3108893142681931848), KQU(16636841767202792021), - KQU(10423316431118400637), KQU( 8008357368674443506), - KQU(11340015231914677875), KQU(17687896501594936090), - KQU(15173627921763199958), KQU( 542569482243721959), - KQU(15071714982769812975), KQU( 4466624872151386956), - KQU( 1901780715602332461), KQU( 9822227742154351098), - KQU( 1479332892928648780), KQU( 6981611948382474400), - KQU( 7620824924456077376), KQU(14095973329429406782), - KQU( 7902744005696185404), KQU(15830577219375036920), - KQU(10287076667317764416), KQU(12334872764071724025), - KQU( 4419302088133544331), KQU(14455842851266090520), - KQU(12488077416504654222), KQU( 7953892017701886766), - KQU( 6331484925529519007), KQU( 4902145853785030022), - KQU(17010159216096443073), KQU(11945354668653886087), - KQU(15112022728645230829), KQU(17363484484522986742), - KQU( 4423497825896692887), KQU( 8155489510809067471), - KQU( 258966605622576285), KQU( 5462958075742020534), - KQU( 6763710214913276228), KQU( 2368935183451109054), - KQU(14209506165246453811), KQU( 2646257040978514881), - KQU( 3776001911922207672), KQU( 1419304601390147631), - KQU(14987366598022458284), KQU( 3977770701065815721), - KQU( 730820417451838898), KQU( 3982991703612885327), - KQU( 2803544519671388477), KQU(17067667221114424649), - KQU( 2922555119737867166), KQU( 1989477584121460932), - KQU(15020387605892337354), KQU( 9293277796427533547), - KQU(10722181424063557247), KQU(16704542332047511651), - KQU( 5008286236142089514), KQU(16174732308747382540), - KQU(17597019485798338402), KQU(13081745199110622093), - KQU( 8850305883842258115), KQU(12723629125624589005), - KQU( 8140566453402805978), KQU(15356684607680935061), - KQU(14222190387342648650), KQU(11134610460665975178), - KQU( 1259799058620984266), KQU(13281656268025610041), - KQU( 298262561068153992), KQU(12277871700239212922), - KQU(13911297774719779438), KQU(16556727962761474934), - KQU(17903010316654728010), KQU( 9682617699648434744), - KQU(14757681836838592850), KQU( 1327242446558524473), - KQU(11126645098780572792), KQU( 1883602329313221774), - KQU( 2543897783922776873), KQU(15029168513767772842), - KQU(12710270651039129878), KQU(16118202956069604504), - KQU(15010759372168680524), KQU( 2296827082251923948), - KQU(10793729742623518101), KQU(13829764151845413046), - KQU(17769301223184451213), KQU( 3118268169210783372), - KQU(17626204544105123127), KQU( 7416718488974352644), - KQU(10450751996212925994), KQU( 9352529519128770586), - KQU( 259347569641110140), KQU( 8048588892269692697), - KQU( 1774414152306494058), KQU(10669548347214355622), - KQU(13061992253816795081), KQU(18432677803063861659), - KQU( 8879191055593984333), KQU(12433753195199268041), - KQU(14919392415439730602), KQU( 6612848378595332963), - KQU( 6320986812036143628), KQU(10465592420226092859), - KQU( 4196009278962570808), KQU( 3747816564473572224), - KQU(17941203486133732898), KQU( 2350310037040505198), - KQU( 5811779859134370113), KQU(10492109599506195126), - KQU( 7699650690179541274), KQU( 1954338494306022961), - KQU(14095816969027231152), KQU( 5841346919964852061), - KQU(14945969510148214735), KQU( 3680200305887550992), - KQU( 6218047466131695792), KQU( 8242165745175775096), - KQU(11021371934053307357), KQU( 1265099502753169797), - KQU( 4644347436111321718), KQU( 3609296916782832859), - KQU( 8109807992218521571), KQU(18387884215648662020), - KQU(14656324896296392902), KQU(17386819091238216751), - KQU(17788300878582317152), KQU( 7919446259742399591), - KQU( 4466613134576358004), KQU(12928181023667938509), - KQU(13147446154454932030), KQU(16552129038252734620), - KQU( 8395299403738822450), KQU(11313817655275361164), - KQU( 434258809499511718), KQU( 2074882104954788676), - KQU( 7929892178759395518), KQU( 9006461629105745388), - KQU( 5176475650000323086), KQU(11128357033468341069), - KQU(12026158851559118955), KQU(14699716249471156500), - KQU( 448982497120206757), KQU( 4156475356685519900), - KQU( 6063816103417215727), KQU(10073289387954971479), - KQU( 8174466846138590962), KQU( 2675777452363449006), - KQU( 9090685420572474281), KQU( 6659652652765562060), - KQU(12923120304018106621), KQU(11117480560334526775), - KQU( 937910473424587511), KQU( 1838692113502346645), - KQU(11133914074648726180), KQU( 7922600945143884053), - KQU(13435287702700959550), KQU( 5287964921251123332), - KQU(11354875374575318947), KQU(17955724760748238133), - KQU(13728617396297106512), KQU( 4107449660118101255), - KQU( 1210269794886589623), KQU(11408687205733456282), - KQU( 4538354710392677887), KQU(13566803319341319267), - KQU(17870798107734050771), KQU( 3354318982568089135), - KQU( 9034450839405133651), KQU(13087431795753424314), - KQU( 950333102820688239), KQU( 1968360654535604116), - KQU(16840551645563314995), KQU( 8867501803892924995), - KQU(11395388644490626845), KQU( 1529815836300732204), - KQU(13330848522996608842), KQU( 1813432878817504265), - KQU( 2336867432693429560), KQU(15192805445973385902), - KQU( 2528593071076407877), KQU( 128459777936689248), - KQU( 9976345382867214866), KQU( 6208885766767996043), - KQU(14982349522273141706), KQU( 3099654362410737822), - KQU(13776700761947297661), KQU( 8806185470684925550), - KQU( 8151717890410585321), KQU( 640860591588072925), - KQU(14592096303937307465), KQU( 9056472419613564846), - KQU(14861544647742266352), KQU(12703771500398470216), - KQU( 3142372800384138465), KQU( 6201105606917248196), - KQU(18337516409359270184), KQU(15042268695665115339), - KQU(15188246541383283846), KQU(12800028693090114519), - KQU( 5992859621101493472), KQU(18278043971816803521), - KQU( 9002773075219424560), KQU( 7325707116943598353), - KQU( 7930571931248040822), KQU( 5645275869617023448), - KQU( 7266107455295958487), KQU( 4363664528273524411), - KQU(14313875763787479809), KQU(17059695613553486802), - KQU( 9247761425889940932), KQU(13704726459237593128), - KQU( 2701312427328909832), KQU(17235532008287243115), - KQU(14093147761491729538), KQU( 6247352273768386516), - KQU( 8268710048153268415), KQU( 7985295214477182083), - KQU(15624495190888896807), KQU( 3772753430045262788), - KQU( 9133991620474991698), KQU( 5665791943316256028), - KQU( 7551996832462193473), KQU(13163729206798953877), - KQU( 9263532074153846374), KQU( 1015460703698618353), - KQU(17929874696989519390), KQU(18257884721466153847), - KQU(16271867543011222991), KQU( 3905971519021791941), - KQU(16814488397137052085), KQU( 1321197685504621613), - KQU( 2870359191894002181), KQU(14317282970323395450), - KQU(13663920845511074366), KQU( 2052463995796539594), - KQU(14126345686431444337), KQU( 1727572121947022534), - KQU(17793552254485594241), KQU( 6738857418849205750), - KQU( 1282987123157442952), KQU(16655480021581159251), - KQU( 6784587032080183866), KQU(14726758805359965162), - KQU( 7577995933961987349), KQU(12539609320311114036), - KQU(10789773033385439494), KQU( 8517001497411158227), - KQU(10075543932136339710), KQU(14838152340938811081), - KQU( 9560840631794044194), KQU(17445736541454117475), - KQU(10633026464336393186), KQU(15705729708242246293), - KQU( 1117517596891411098), KQU( 4305657943415886942), - KQU( 4948856840533979263), KQU(16071681989041789593), - KQU(13723031429272486527), KQU( 7639567622306509462), - KQU(12670424537483090390), KQU( 9715223453097197134), - KQU( 5457173389992686394), KQU( 289857129276135145), - KQU(17048610270521972512), KQU( 692768013309835485), - KQU(14823232360546632057), KQU(18218002361317895936), - KQU( 3281724260212650204), KQU(16453957266549513795), - KQU( 8592711109774511881), KQU( 929825123473369579), - KQU(15966784769764367791), KQU( 9627344291450607588), - KQU(10849555504977813287), KQU( 9234566913936339275), - KQU( 6413807690366911210), KQU(10862389016184219267), - KQU(13842504799335374048), KQU( 1531994113376881174), - KQU( 2081314867544364459), KQU(16430628791616959932), - KQU( 8314714038654394368), KQU( 9155473892098431813), - KQU(12577843786670475704), KQU( 4399161106452401017), - KQU( 1668083091682623186), KQU( 1741383777203714216), - KQU( 2162597285417794374), KQU(15841980159165218736), - KQU( 1971354603551467079), KQU( 1206714764913205968), - KQU( 4790860439591272330), KQU(14699375615594055799), - KQU( 8374423871657449988), KQU(10950685736472937738), - KQU( 697344331343267176), KQU(10084998763118059810), - KQU(12897369539795983124), KQU(12351260292144383605), - KQU( 1268810970176811234), KQU( 7406287800414582768), - KQU( 516169557043807831), KQU( 5077568278710520380), - KQU( 3828791738309039304), KQU( 7721974069946943610), - KQU( 3534670260981096460), KQU( 4865792189600584891), - KQU(16892578493734337298), KQU( 9161499464278042590), - KQU(11976149624067055931), KQU(13219479887277343990), - KQU(14161556738111500680), KQU(14670715255011223056), - KQU( 4671205678403576558), KQU(12633022931454259781), - KQU(14821376219869187646), KQU( 751181776484317028), - KQU( 2192211308839047070), KQU(11787306362361245189), - KQU(10672375120744095707), KQU( 4601972328345244467), - KQU(15457217788831125879), KQU( 8464345256775460809), - KQU(10191938789487159478), KQU( 6184348739615197613), - KQU(11425436778806882100), KQU( 2739227089124319793), - KQU( 461464518456000551), KQU( 4689850170029177442), - KQU( 6120307814374078625), KQU(11153579230681708671), - KQU( 7891721473905347926), KQU(10281646937824872400), - KQU( 3026099648191332248), KQU( 8666750296953273818), - KQU(14978499698844363232), KQU(13303395102890132065), - KQU( 8182358205292864080), KQU(10560547713972971291), - KQU(11981635489418959093), KQU( 3134621354935288409), - KQU(11580681977404383968), KQU(14205530317404088650), - KQU( 5997789011854923157), KQU(13659151593432238041), - KQU(11664332114338865086), KQU( 7490351383220929386), - KQU( 7189290499881530378), KQU(15039262734271020220), - KQU( 2057217285976980055), KQU( 555570804905355739), - KQU(11235311968348555110), KQU(13824557146269603217), - KQU(16906788840653099693), KQU( 7222878245455661677), - KQU( 5245139444332423756), KQU( 4723748462805674292), - KQU(12216509815698568612), KQU(17402362976648951187), - KQU(17389614836810366768), KQU( 4880936484146667711), - KQU( 9085007839292639880), KQU(13837353458498535449), - KQU(11914419854360366677), KQU(16595890135313864103), - KQU( 6313969847197627222), KQU(18296909792163910431), - KQU(10041780113382084042), KQU( 2499478551172884794), - KQU(11057894246241189489), KQU( 9742243032389068555), - KQU(12838934582673196228), KQU(13437023235248490367), - KQU(13372420669446163240), KQU( 6752564244716909224), - KQU( 7157333073400313737), KQU(12230281516370654308), - KQU( 1182884552219419117), KQU( 2955125381312499218), - KQU(10308827097079443249), KQU( 1337648572986534958), - KQU(16378788590020343939), KQU( 108619126514420935), - KQU( 3990981009621629188), KQU( 5460953070230946410), - KQU( 9703328329366531883), KQU(13166631489188077236), - KQU( 1104768831213675170), KQU( 3447930458553877908), - KQU( 8067172487769945676), KQU( 5445802098190775347), - KQU( 3244840981648973873), KQU(17314668322981950060), - KQU( 5006812527827763807), KQU(18158695070225526260), - KQU( 2824536478852417853), KQU(13974775809127519886), - KQU( 9814362769074067392), KQU(17276205156374862128), - KQU(11361680725379306967), KQU( 3422581970382012542), - KQU(11003189603753241266), KQU(11194292945277862261), - KQU( 6839623313908521348), KQU(11935326462707324634), - KQU( 1611456788685878444), KQU(13112620989475558907), - KQU( 517659108904450427), KQU(13558114318574407624), - KQU(15699089742731633077), KQU( 4988979278862685458), - KQU( 8111373583056521297), KQU( 3891258746615399627), - KQU( 8137298251469718086), KQU(12748663295624701649), - KQU( 4389835683495292062), KQU( 5775217872128831729), - KQU( 9462091896405534927), KQU( 8498124108820263989), - KQU( 8059131278842839525), KQU(10503167994254090892), - KQU(11613153541070396656), KQU(18069248738504647790), - KQU( 570657419109768508), KQU( 3950574167771159665), - KQU( 5514655599604313077), KQU( 2908460854428484165), - KQU(10777722615935663114), KQU(12007363304839279486), - KQU( 9800646187569484767), KQU( 8795423564889864287), - KQU(14257396680131028419), KQU( 6405465117315096498), - KQU( 7939411072208774878), KQU(17577572378528990006), - KQU(14785873806715994850), KQU(16770572680854747390), - KQU(18127549474419396481), KQU(11637013449455757750), - KQU(14371851933996761086), KQU( 3601181063650110280), - KQU( 4126442845019316144), KQU(10198287239244320669), - KQU(18000169628555379659), KQU(18392482400739978269), - KQU( 6219919037686919957), KQU( 3610085377719446052), - KQU( 2513925039981776336), KQU(16679413537926716955), - KQU(12903302131714909434), KQU( 5581145789762985009), - KQU(12325955044293303233), KQU(17216111180742141204), - KQU( 6321919595276545740), KQU( 3507521147216174501), - KQU( 9659194593319481840), KQU(11473976005975358326), - KQU(14742730101435987026), KQU( 492845897709954780), - KQU(16976371186162599676), KQU(17712703422837648655), - KQU( 9881254778587061697), KQU( 8413223156302299551), - KQU( 1563841828254089168), KQU( 9996032758786671975), - KQU( 138877700583772667), KQU(13003043368574995989), - KQU( 4390573668650456587), KQU( 8610287390568126755), - KQU(15126904974266642199), KQU( 6703637238986057662), - KQU( 2873075592956810157), KQU( 6035080933946049418), - KQU(13382846581202353014), KQU( 7303971031814642463), - KQU(18418024405307444267), KQU( 5847096731675404647), - KQU( 4035880699639842500), KQU(11525348625112218478), - KQU( 3041162365459574102), KQU( 2604734487727986558), - KQU(15526341771636983145), KQU(14556052310697370254), - KQU(12997787077930808155), KQU( 9601806501755554499), - KQU(11349677952521423389), KQU(14956777807644899350), - KQU(16559736957742852721), KQU(12360828274778140726), - KQU( 6685373272009662513), KQU(16932258748055324130), - KQU(15918051131954158508), KQU( 1692312913140790144), - KQU( 546653826801637367), KQU( 5341587076045986652), - KQU(14975057236342585662), KQU(12374976357340622412), - KQU(10328833995181940552), KQU(12831807101710443149), - KQU(10548514914382545716), KQU( 2217806727199715993), - KQU(12627067369242845138), KQU( 4598965364035438158), - KQU( 150923352751318171), KQU(14274109544442257283), - KQU( 4696661475093863031), KQU( 1505764114384654516), - KQU(10699185831891495147), KQU( 2392353847713620519), - KQU( 3652870166711788383), KQU( 8640653276221911108), - KQU( 3894077592275889704), KQU( 4918592872135964845), - KQU(16379121273281400789), KQU(12058465483591683656), - KQU(11250106829302924945), KQU( 1147537556296983005), - KQU( 6376342756004613268), KQU(14967128191709280506), - KQU(18007449949790627628), KQU( 9497178279316537841), - KQU( 7920174844809394893), KQU(10037752595255719907), - KQU(15875342784985217697), KQU(15311615921712850696), - KQU( 9552902652110992950), KQU(14054979450099721140), - KQU( 5998709773566417349), KQU(18027910339276320187), - KQU( 8223099053868585554), KQU( 7842270354824999767), - KQU( 4896315688770080292), KQU(12969320296569787895), - KQU( 2674321489185759961), KQU( 4053615936864718439), - KQU(11349775270588617578), KQU( 4743019256284553975), - KQU( 5602100217469723769), KQU(14398995691411527813), - KQU( 7412170493796825470), KQU( 836262406131744846), - KQU( 8231086633845153022), KQU( 5161377920438552287), - KQU( 8828731196169924949), KQU(16211142246465502680), - KQU( 3307990879253687818), KQU( 5193405406899782022), - KQU( 8510842117467566693), KQU( 6070955181022405365), - KQU(14482950231361409799), KQU(12585159371331138077), - KQU( 3511537678933588148), KQU( 2041849474531116417), - KQU(10944936685095345792), KQU(18303116923079107729), - KQU( 2720566371239725320), KQU( 4958672473562397622), - KQU( 3032326668253243412), KQU(13689418691726908338), - KQU( 1895205511728843996), KQU( 8146303515271990527), - KQU(16507343500056113480), KQU( 473996939105902919), - KQU( 9897686885246881481), KQU(14606433762712790575), - KQU( 6732796251605566368), KQU( 1399778120855368916), - KQU( 935023885182833777), KQU(16066282816186753477), - KQU( 7291270991820612055), KQU(17530230393129853844), - KQU(10223493623477451366), KQU(15841725630495676683), - KQU(17379567246435515824), KQU( 8588251429375561971), - KQU(18339511210887206423), KQU(17349587430725976100), - KQU(12244876521394838088), KQU( 6382187714147161259), - KQU(12335807181848950831), KQU(16948885622305460665), - KQU(13755097796371520506), KQU(14806740373324947801), - KQU( 4828699633859287703), KQU( 8209879281452301604), - KQU(12435716669553736437), KQU(13970976859588452131), - KQU( 6233960842566773148), KQU(12507096267900505759), - KQU( 1198713114381279421), KQU(14989862731124149015), - KQU(15932189508707978949), KQU( 2526406641432708722), - KQU( 29187427817271982), KQU( 1499802773054556353), - KQU(10816638187021897173), KQU( 5436139270839738132), - KQU( 6659882287036010082), KQU( 2154048955317173697), - KQU(10887317019333757642), KQU(16281091802634424955), - KQU(10754549879915384901), KQU(10760611745769249815), - KQU( 2161505946972504002), KQU( 5243132808986265107), - KQU(10129852179873415416), KQU( 710339480008649081), - KQU( 7802129453068808528), KQU(17967213567178907213), - KQU(15730859124668605599), KQU(13058356168962376502), - KQU( 3701224985413645909), KQU(14464065869149109264), - KQU( 9959272418844311646), KQU(10157426099515958752), - KQU(14013736814538268528), KQU(17797456992065653951), - KQU(17418878140257344806), KQU(15457429073540561521), - KQU( 2184426881360949378), KQU( 2062193041154712416), - KQU( 8553463347406931661), KQU( 4913057625202871854), - KQU( 2668943682126618425), KQU(17064444737891172288), - KQU( 4997115903913298637), KQU(12019402608892327416), - KQU(17603584559765897352), KQU(11367529582073647975), - KQU( 8211476043518436050), KQU( 8676849804070323674), - KQU(18431829230394475730), KQU(10490177861361247904), - KQU( 9508720602025651349), KQU( 7409627448555722700), - KQU( 5804047018862729008), KQU(11943858176893142594), - KQU(11908095418933847092), KQU( 5415449345715887652), - KQU( 1554022699166156407), KQU( 9073322106406017161), - KQU( 7080630967969047082), KQU(18049736940860732943), - KQU(12748714242594196794), KQU( 1226992415735156741), - KQU(17900981019609531193), KQU(11720739744008710999), - KQU( 3006400683394775434), KQU(11347974011751996028), - KQU( 3316999628257954608), KQU( 8384484563557639101), - KQU(18117794685961729767), KQU( 1900145025596618194), - KQU(17459527840632892676), KQU( 5634784101865710994), - KQU( 7918619300292897158), KQU( 3146577625026301350), - KQU( 9955212856499068767), KQU( 1873995843681746975), - KQU( 1561487759967972194), KQU( 8322718804375878474), - KQU(11300284215327028366), KQU( 4667391032508998982), - KQU( 9820104494306625580), KQU(17922397968599970610), - KQU( 1784690461886786712), KQU(14940365084341346821), - KQU( 5348719575594186181), KQU(10720419084507855261), - KQU(14210394354145143274), KQU( 2426468692164000131), - KQU(16271062114607059202), KQU(14851904092357070247), - KQU( 6524493015693121897), KQU( 9825473835127138531), - KQU(14222500616268569578), KQU(15521484052007487468), - KQU(14462579404124614699), KQU(11012375590820665520), - KQU(11625327350536084927), KQU(14452017765243785417), - KQU( 9989342263518766305), KQU( 3640105471101803790), - KQU( 4749866455897513242), KQU(13963064946736312044), - KQU(10007416591973223791), KQU(18314132234717431115), - KQU( 3286596588617483450), KQU( 7726163455370818765), - KQU( 7575454721115379328), KQU( 5308331576437663422), - KQU(18288821894903530934), KQU( 8028405805410554106), - KQU(15744019832103296628), KQU( 149765559630932100), - KQU( 6137705557200071977), KQU(14513416315434803615), - KQU(11665702820128984473), KQU( 218926670505601386), - KQU( 6868675028717769519), KQU(15282016569441512302), - KQU( 5707000497782960236), KQU( 6671120586555079567), - KQU( 2194098052618985448), KQU(16849577895477330978), - KQU(12957148471017466283), KQU( 1997805535404859393), - KQU( 1180721060263860490), KQU(13206391310193756958), - KQU(12980208674461861797), KQU( 3825967775058875366), - KQU(17543433670782042631), KQU( 1518339070120322730), - KQU(16344584340890991669), KQU( 2611327165318529819), - KQU(11265022723283422529), KQU( 4001552800373196817), - KQU(14509595890079346161), KQU( 3528717165416234562), - KQU(18153222571501914072), KQU( 9387182977209744425), - KQU(10064342315985580021), KQU(11373678413215253977), - KQU( 2308457853228798099), KQU( 9729042942839545302), - KQU( 7833785471140127746), KQU( 6351049900319844436), - KQU(14454610627133496067), KQU(12533175683634819111), - KQU(15570163926716513029), KQU(13356980519185762498) -}; +static const uint32_t init_gen_rand_32_expected[] = {3440181298U, 1564997079U, + 1510669302U, 2930277156U, 1452439940U, 3796268453U, 423124208U, 2143818589U, + 3827219408U, 2987036003U, 2674978610U, 1536842514U, 2027035537U, + 2534897563U, 1686527725U, 545368292U, 1489013321U, 1370534252U, 4231012796U, + 3994803019U, 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, + 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, 3206507879U, + 2378925033U, 1040214787U, 2524778605U, 3088428700U, 1417665896U, 964324147U, + 2282797708U, 2456269299U, 313400376U, 2245093271U, 1015729427U, 2694465011U, + 3246975184U, 1992793635U, 463679346U, 3721104591U, 3475064196U, 856141236U, + 1499559719U, 3522818941U, 3721533109U, 1954826617U, 1282044024U, + 1543279136U, 1301863085U, 2669145051U, 4221477354U, 3896016841U, + 3392740262U, 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, + 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, 157593879U, + 1136901695U, 4038377686U, 3572517236U, 4231706728U, 2997311961U, + 1189931652U, 3981543765U, 2826166703U, 87159245U, 1721379072U, 3897926942U, + 1790395498U, 2569178939U, 1047368729U, 2340259131U, 3144212906U, + 2301169789U, 2442885464U, 3034046771U, 3667880593U, 3935928400U, + 2372805237U, 1666397115U, 2460584504U, 513866770U, 3810869743U, 2147400037U, + 2792078025U, 2941761810U, 3212265810U, 984692259U, 346590253U, 1804179199U, + 3298543443U, 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, + 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, + 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, 1202933193U, + 4087643167U, 2590021067U, 2256240196U, 1746697293U, 1013913783U, + 1155864921U, 2715773730U, 915061862U, 1948766573U, 2322882854U, 3761119102U, + 1343405684U, 3078711943U, 3067431651U, 3245156316U, 3588354584U, + 3484623306U, 3899621563U, 4156689741U, 3237090058U, 3880063844U, 862416318U, + 4039923869U, 2303788317U, 3073590536U, 701653667U, 2131530884U, 3169309950U, + 2028486980U, 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, + 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, 1104864179U, + 342430307U, 1350510923U, 3024656237U, 1028417492U, 2870772950U, 290847558U, + 3675663500U, 508431529U, 4264340390U, 2263569913U, 1669302976U, 519511383U, + 2706411211U, 3764615828U, 3883162495U, 4051445305U, 2412729798U, + 3299405164U, 3991911166U, 2348767304U, 2664054906U, 3763609282U, 593943581U, + 3757090046U, 2075338894U, 2020550814U, 4287452920U, 4290140003U, + 1422957317U, 2512716667U, 2003485045U, 2307520103U, 2288472169U, + 3940751663U, 4204638664U, 2892583423U, 1710068300U, 3904755993U, + 2363243951U, 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, + 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, 3083335297U, + 26885281U, 3932155283U, 1531751116U, 1425227133U, 495654159U, 3279634176U, + 3855562207U, 3957195338U, 4159985527U, 893375062U, 1875515536U, 1327247422U, + 3754140693U, 1028923197U, 1729880440U, 805571298U, 448971099U, 2726757106U, + 2749436461U, 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, + 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, 3893463573U, + 3977583081U, 2636504348U, 1110673525U, 3548479841U, 4258854744U, 980047703U, + 4057175418U, 3890008292U, 145653646U, 3141868989U, 3293216228U, 1194331837U, + 1254570642U, 3049934521U, 2868313360U, 2886032750U, 1110873820U, 279553524U, + 3007258565U, 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, + 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, 444526410U, + 779157624U, 1088229627U, 1092460223U, 1856013765U, 3659877367U, 368270451U, + 503570716U, 3000984671U, 2742789647U, 928097709U, 2914109539U, 308843566U, + 2816161253U, 3667192079U, 2762679057U, 3395240989U, 2928925038U, + 1491465914U, 3458702834U, 3787782576U, 2894104823U, 1296880455U, + 1253636503U, 989959407U, 2291560361U, 2776790436U, 1913178042U, 1584677829U, + 689637520U, 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, + 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, 1080720688U, + 3938032556U, 387896427U, 2650839632U, 99042991U, 1720913794U, 1047186003U, + 1877048040U, 2090457659U, 517087501U, 4172014665U, 2129713163U, 2413533132U, + 2760285054U, 4129272496U, 1317737175U, 2309566414U, 2228873332U, + 3889671280U, 1110864630U, 3576797776U, 2074552772U, 832002644U, 3097122623U, + 2464859298U, 2679603822U, 1667489885U, 3237652716U, 1478413938U, + 1719340335U, 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, + 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, 841660320U, + 3974501451U, 3360949056U, 1676829340U, 728899254U, 2047809627U, 2390948962U, + 670165943U, 3412951831U, 4189320049U, 1911595255U, 2055363086U, 507170575U, + 418219594U, 4141495280U, 2692088692U, 4203630654U, 3540093932U, 791986533U, + 2237921051U, 2526864324U, 2956616642U, 1394958700U, 1983768223U, + 1893373266U, 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, + 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, 1123578029U, + 1804276347U, 997971319U, 4203797076U, 4185199713U, 2811733626U, 2343642194U, + 2985262313U, 1417930827U, 3759587724U, 1967077982U, 1585223204U, + 1097475516U, 1903944948U, 740382444U, 1114142065U, 1541796065U, 1718384172U, + 1544076191U, 1134682254U, 3519754455U, 2866243923U, 341865437U, 645498576U, + 2690735853U, 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, + 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, + 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, 1297726519U, + 219544855U, 4270285558U, 437578827U, 1444698679U, 2258519491U, 963109892U, + 3982244073U, 3351535275U, 385328496U, 1804784013U, 698059346U, 3920535147U, + 708331212U, 784338163U, 785678147U, 1238376158U, 1557298846U, 2037809321U, + 271576218U, 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, + 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, 2963500837U, + 2253971215U, 3153642623U, 1066925709U, 2582781958U, 3034720222U, + 1090798544U, 2942170004U, 4036187520U, 686972531U, 2610990302U, 2641437026U, + 1837562420U, 722096247U, 1315333033U, 2102231203U, 3402389208U, 3403698140U, + 1312402831U, 2898426558U, 814384596U, 385649582U, 1916643285U, 1924625106U, + 2512905582U, 2501170304U, 4275223366U, 2841225246U, 1467663688U, + 3563567847U, 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, + 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, + 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, + 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, 3752392312U, + 1287475550U, 3770904171U, 3004244617U, 1502117784U, 918698423U, 2419857538U, + 3864502062U, 1751322107U, 2188775056U, 4018728324U, 983712955U, 440071928U, + 3710838677U, 2001027698U, 3994702151U, 22493119U, 3584400918U, 3446253670U, + 4254789085U, 1405447860U, 1240245579U, 1800644159U, 1661363424U, + 3278326132U, 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, + 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, 1678544474U, + 3148435887U, 3457217359U, 1193226330U, 2816576908U, 154025329U, 121678860U, + 1164915738U, 973873761U, 269116100U, 52087970U, 744015362U, 498556057U, + 94298882U, 1563271621U, 2383059628U, 4197367290U, 3958472990U, 2592083636U, + 2906408439U, 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, + 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, + 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, 226777499U, + 2496151295U, 2207301712U, 3283683112U, 611630281U, 1933218215U, 3315610954U, + 3889441987U, 3719454256U, 3957190521U, 1313998161U, 2365383016U, + 3146941060U, 1801206260U, 796124080U, 2076248581U, 1747472464U, 3254365145U, + 595543130U, 3573909503U, 3758250204U, 2020768540U, 2439254210U, 93368951U, + 3155792250U, 2600232980U, 3709198295U, 3894900440U, 2971850836U, + 1578909644U, 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, + 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, 1001330373U, + 3561331654U, 2259301289U, 1564977624U, 3835077093U, 727244906U, 4255738067U, + 1214133513U, 2570786021U, 3899704621U, 1633861986U, 1636979509U, + 1438500431U, 58463278U, 2823485629U, 2297430187U, 2926781924U, 3371352948U, + 1864009023U, 2722267973U, 1444292075U, 437703973U, 1060414512U, 189705863U, + 910018135U, 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, + 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, 2978462572U, + 2176222820U, 829424696U, 2790788332U, 2750819108U, 1594611657U, 3899878394U, + 3032870364U, 1702887682U, 1948167778U, 14130042U, 192292500U, 947227076U, + 90719497U, 3854230320U, 784028434U, 2142399787U, 1563449646U, 2844400217U, + 819143172U, 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, + 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, 1412424497U, + 2981395985U, 1418359660U, 2925902456U, 52752784U, 3713667988U, 3924669405U, + 648975707U, 1145520213U, 4018650664U, 3805915440U, 2380542088U, 2013260958U, + 3262572197U, 2465078101U, 1114540067U, 3728768081U, 2396958768U, 590672271U, + 904818725U, 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, + 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, 1142661026U, + 2889931380U, 14316674U, 2201179167U, 415289459U, 448265759U, 3515142743U, + 3254903683U, 246633281U, 1184307224U, 2418347830U, 2092967314U, 2682072314U, + 2558750234U, 2000352263U, 1544150531U, 399010405U, 1513946097U, 499682937U, + 461167460U, 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, + 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, 349178792U, + 226482567U, 3102426060U, 3575998268U, 2103001871U, 3243137071U, 225500688U, + 1634718593U, 4283311431U, 4292122923U, 3842802787U, 811735523U, 105712518U, + 663434053U, 1855889273U, 2847972595U, 1196355421U, 2552150115U, 4254510614U, + 3752181265U, 3430721819U, 3828705396U, 3436287905U, 3441964937U, + 4123670631U, 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, + 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, 4134660419U, + 3903444024U, 3576494993U, 203682175U, 3321164857U, 2747963611U, 79749085U, + 2992890370U, 1240278549U, 1772175713U, 2111331972U, 2655023449U, + 1683896345U, 2836027212U, 3482868021U, 2489884874U, 756853961U, 2298874501U, + 4013448667U, 4143996022U, 2948306858U, 4132920035U, 1283299272U, 995592228U, + 3450508595U, 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, + 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, 2050079547U, + 3198903947U, 3105589778U, 4066481316U, 3026383978U, 2276901713U, 365637751U, + 2260718426U, 1394775634U, 1791172338U, 2690503163U, 2952737846U, + 1568710462U, 732623190U, 2980358000U, 1053631832U, 1432426951U, 3229149635U, + 1854113985U, 3719733532U, 3204031934U, 735775531U, 107468620U, 3734611984U, + 631009402U, 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, + 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, 1381727755U, + 405608287U, 4287919625U, 1703554290U, 3589580244U, 2911403488U, 2166565U, + 2647306451U, 2330535117U, 1200815358U, 1165916754U, 245060911U, 4040679071U, + 3684908771U, 2452834126U, 2486872773U, 2318678365U, 2940627908U, + 1837837240U, 3447897409U, 4270484676U, 1495388728U, 3754288477U, + 4204167884U, 1386977705U, 2692224733U, 3076249689U, 4109568048U, + 4170955115U, 4167531356U, 4020189950U, 4261855038U, 3036907575U, + 3410399885U, 3076395737U, 1046178638U, 144496770U, 230725846U, 3349637149U, + 17065717U, 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, + 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, 3120404710U, + 254684547U, 2653661580U, 3663904795U, 2631942758U, 1063234347U, 2609732900U, + 2332080715U, 3521125233U, 1180599599U, 1935868586U, 4110970440U, 296706371U, + 2128666368U, 1319875791U, 1570900197U, 3096025483U, 1799882517U, + 1928302007U, 1163707758U, 1244491489U, 3533770203U, 567496053U, 2757924305U, + 2781639343U, 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, + 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, 330725126U, + 367400677U, 888239854U, 545570454U, 4259590525U, 134343617U, 1102169784U, + 1647463719U, 3260979784U, 1518840883U, 3631537963U, 3342671457U, + 1301549147U, 2083739356U, 146593792U, 3217959080U, 652755743U, 2032187193U, + 3898758414U, 1021358093U, 4037409230U, 2176407931U, 3427391950U, + 2883553603U, 985613827U, 3105265092U, 3423168427U, 3387507672U, 467170288U, + 2141266163U, 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, + 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, 639683232U, + 2639569327U, 1218546948U, 4263586685U, 3058215773U, 2352279820U, 401870217U, + 2625822463U, 1529125296U, 2981801895U, 1191285226U, 4027725437U, + 3432700217U, 4098835661U, 971182783U, 2443861173U, 3881457123U, 3874386651U, + 457276199U, 2638294160U, 4002809368U, 421169044U, 1112642589U, 3076213779U, + 3387033971U, 2499610950U, 3057240914U, 1662679783U, 461224431U, + 1168395933U}; +static const uint32_t init_by_array_32_expected[] = {2920711183U, 3885745737U, + 3501893680U, 856470934U, 1421864068U, 277361036U, 1518638004U, 2328404353U, + 3355513634U, 64329189U, 1624587673U, 3508467182U, 2481792141U, 3706480799U, + 1925859037U, 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, + 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, + 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, 121399709U, + 3170839019U, 4044347501U, 953953814U, 3821710850U, 3085591323U, 3666535579U, + 3577837737U, 2012008410U, 3565417471U, 4044408017U, 433600965U, 1637785608U, + 1798509764U, 860770589U, 3081466273U, 3982393409U, 2451928325U, 3437124742U, + 4093828739U, 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, + 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, 4078331588U, + 3706103141U, 170391138U, 3806085154U, 1680970100U, 1961637521U, 3316029766U, + 890610272U, 1453751581U, 1430283664U, 3051057411U, 3597003186U, 542563954U, + 3796490244U, 1690016688U, 3448752238U, 440702173U, 347290497U, 1121336647U, + 2540588620U, 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, + 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, 2315569495U, + 2729518615U, 564745877U, 1263517638U, 3157185798U, 1604852056U, 1011639885U, + 2950579535U, 2524219188U, 312951012U, 1528896652U, 1327861054U, 2846910138U, + 3966855905U, 2536721582U, 855353911U, 1685434729U, 3303978929U, 1624872055U, + 4020329649U, 3164802143U, 1642802700U, 1957727869U, 1792352426U, + 3334618929U, 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, + 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, + 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, + 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, 2590843588U, + 3207422808U, 3275066464U, 561592872U, 3957205738U, 3396578098U, 48410678U, + 3505556445U, 1005764855U, 3920606528U, 2936980473U, 2378918600U, + 2404449845U, 1649515163U, 701203563U, 3705256349U, 83714199U, 3586854132U, + 922978446U, 2863406304U, 3523398907U, 2606864832U, 2385399361U, 3171757816U, + 4262841009U, 3645837721U, 1169579486U, 3666433897U, 3174689479U, + 1457866976U, 3803895110U, 3346639145U, 1907224409U, 1978473712U, + 1036712794U, 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, + 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, 1597354672U, + 3349636117U, 2357291114U, 3995796221U, 945364213U, 1893326518U, 3770814016U, + 1691552714U, 2397527410U, 967486361U, 776416472U, 4197661421U, 951150819U, + 1852770983U, 4044624181U, 1399439738U, 4194455275U, 2284037669U, + 1550734958U, 3321078108U, 1865235926U, 2912129961U, 2664980877U, + 1357572033U, 2600196436U, 2486728200U, 2372668724U, 1567316966U, + 2374111491U, 1839843570U, 20815612U, 3727008608U, 3871996229U, 824061249U, + 1932503978U, 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, + 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, 1032413764U, + 4185884695U, 2490396037U, 1201932817U, 4060951446U, 4165586898U, + 1629813212U, 2887821158U, 415045333U, 628926856U, 2193466079U, 3391843445U, + 2227540681U, 1907099846U, 2848448395U, 1717828221U, 1372704537U, + 1707549841U, 2294058813U, 2101214437U, 2052479531U, 1695809164U, + 3176587306U, 2632770465U, 81634404U, 1603220563U, 644238487U, 302857763U, + 897352968U, 2613146653U, 1391730149U, 4245717312U, 4191828749U, 1948492526U, + 2618174230U, 3992984522U, 2178852787U, 3596044509U, 3445573503U, + 2026614616U, 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, + 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, 3004150659U, + 2135025926U, 948690501U, 2799119116U, 4228829406U, 1981197489U, 4209064138U, + 684318751U, 3459397845U, 201790843U, 4022541136U, 3043635877U, 492509624U, + 3263466772U, 1509148086U, 921459029U, 3198857146U, 705479721U, 3835966910U, + 3603356465U, 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, + 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, 2464815318U, + 3960178104U, 1784261920U, 18311476U, 3627135050U, 644609697U, 424968996U, + 919890700U, 2986824110U, 816423214U, 4003562844U, 1392714305U, 1757384428U, + 2569030598U, 995949559U, 3875659880U, 2933807823U, 2752536860U, 2993858466U, + 4030558899U, 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, + 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, 3578063323U, + 3242082049U, 1778193530U, 27981909U, 2362826515U, 389875677U, 1043878156U, + 581653903U, 3830568952U, 389535942U, 3713523185U, 2768373359U, 2526101582U, + 1998618197U, 1160859704U, 3951172488U, 1098005003U, 906275699U, 3446228002U, + 2220677963U, 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, + 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, 4288137521U, + 819087232U, 596301494U, 872823172U, 1526888217U, 805161465U, 1116186205U, + 2829002754U, 2352620120U, 620121516U, 354159268U, 3601949785U, 209568138U, + 1352371732U, 2145977349U, 4236871834U, 1539414078U, 3558126206U, + 3224857093U, 4164166682U, 3817553440U, 3301780278U, 2682696837U, + 3734994768U, 1370950260U, 1477421202U, 2521315749U, 1330148125U, + 1261554731U, 2769143688U, 3554756293U, 4235882678U, 3254686059U, + 3530579953U, 1215452615U, 3574970923U, 4057131421U, 589224178U, 1000098193U, + 171190718U, 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, + 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, + 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, + 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, + 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, 81165449U, + 612438025U, 3912966678U, 1356929810U, 733545735U, 537003843U, 1282953084U, + 884458241U, 588930090U, 3930269801U, 2961472450U, 1219535534U, 3632251943U, + 268183903U, 1441240533U, 3653903360U, 3854473319U, 2259087390U, 2548293048U, + 2022641195U, 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, + 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, 1544049923U, + 1797944973U, 3398652364U, 3111909456U, 485742908U, 2277491072U, 1056355088U, + 3181001278U, 129695079U, 2693624550U, 1764438564U, 3797785470U, 195503713U, + 3266519725U, 2053389444U, 1961527818U, 3400226523U, 3777903038U, + 2597274307U, 4235851091U, 4094406648U, 2171410785U, 1781151386U, + 1378577117U, 654643266U, 3424024173U, 3385813322U, 679385799U, 479380913U, + 681715441U, 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, + 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, + 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, 3972652090U, + 679881088U, 40577009U, 3705286397U, 2815423480U, 3566262429U, 663396513U, + 3777887429U, 4016670678U, 404539370U, 1142712925U, 1140173408U, 2913248352U, + 2872321286U, 263751841U, 3175196073U, 3162557581U, 2878996619U, 75498548U, + 3836833140U, 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, + 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, 1767889440U, + 4264698824U, 1582999313U, 884471997U, 2508825098U, 3756370771U, 2457213553U, + 3565776881U, 3709583214U, 915609601U, 460833524U, 1091049576U, 85522880U, + 2553251U, 132102809U, 2429882442U, 2562084610U, 1386507633U, 4112471229U, + 21965213U, 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, + 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, + 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, 1596096923U, + 610150600U, 431464457U, 2541325046U, 486478003U, 739704936U, 2862696430U, + 3037903166U, 1129749694U, 2611481261U, 1228993498U, 510075548U, 3424962587U, + 2458689681U, 818934833U, 4233309125U, 1608196251U, 3419476016U, 1858543939U, + 2682166524U, 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, + 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, 565267881U, + 768644821U, 198310105U, 2396688616U, 1837659011U, 203429334U, 854539004U, + 4235811518U, 3338304926U, 3730418692U, 3852254981U, 3032046452U, + 2329811860U, 2303590566U, 2696092212U, 3894665932U, 145835667U, 249563655U, + 1932210840U, 2431696407U, 3312636759U, 214962629U, 2092026914U, 3020145527U, + 4073039873U, 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, + 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, 3888005760U, + 696099141U, 397343236U, 1864511780U, 44029739U, 1729526891U, 1993398655U, + 2010173426U, 2591546756U, 275223291U, 1503900299U, 4217765081U, 2185635252U, + 1122436015U, 3550155364U, 681707194U, 3260479338U, 933579397U, 2983029282U, + 2505504587U, 2667410393U, 2962684490U, 4139721708U, 2658172284U, + 2452602383U, 2607631612U, 1344296217U, 3075398709U, 2949785295U, + 1049956168U, 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, + 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, 3368723763U, + 2129144130U, 3203528633U, 3087174986U, 2691698871U, 2516284287U, 24437745U, + 1118381474U, 2816314867U, 2448576035U, 4281989654U, 217287825U, 165872888U, + 2628995722U, 3533525116U, 2721669106U, 872340568U, 3429930655U, 3309047304U, + 3916704967U, 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, + 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, 3853082619U, + 4073196549U, 1189620777U, 637238656U, 930241537U, 4042750792U, 3842136042U, + 2417007212U, 2524907510U, 1243036827U, 1282059441U, 3764588774U, + 1394459615U, 2323620015U, 1166152231U, 3307479609U, 3849322257U, + 3507445699U, 4247696636U, 758393720U, 967665141U, 1095244571U, 1319812152U, + 407678762U, 2640605208U, 2170766134U, 3663594275U, 4039329364U, 2512175520U, + 725523154U, 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, + 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, 226175489U, + 2961367263U, 1968719665U, 42656370U, 1010790699U, 561600615U, 2422453992U, + 3082197735U, 1636700484U, 3977715296U, 3125350482U, 3478021514U, + 2227819446U, 1540868045U, 3061908980U, 1087362407U, 3625200291U, 361937537U, + 580441897U, 1520043666U, 2270875402U, 1009161260U, 2502355842U, 4278769785U, + 473902412U, 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, + 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, 674112918U, + 195425752U, 3917890095U, 1874364234U, 1837892715U, 3663478166U, 1548892014U, + 2570748714U, 2049929836U, 2167029704U, 697543767U, 3499545023U, 3342496315U, + 1725251190U, 3561387469U, 2905606616U, 1580182447U, 3934525927U, + 4103172792U, 1365672522U, 1534795737U, 3308667416U, 2841911405U, + 3943182730U, 4072020313U, 3494770452U, 3332626671U, 55327267U, 478030603U, + 411080625U, 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, + 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, 202895590U, + 1609122645U, 1267651008U, 2910315509U, 2511475445U, 2477423819U, + 3932081579U, 900879979U, 2145588390U, 2670007504U, 580819444U, 1864996828U, + 2526325979U, 1019124258U, 815508628U, 2765933989U, 1277301341U, 3006021786U, + 855540956U, 288025710U, 1919594237U, 2331223864U, 177452412U, 2475870369U, + 2689291749U, 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, + 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, + 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, 1678115244U, + 2699839832U, 3651968520U, 3521595541U, 458433303U, 2423096824U, 21831741U, + 380011703U, 2498168716U, 861806087U, 1673574843U, 4188794405U, 2520563651U, + 2632279153U, 2170465525U, 4171949898U, 3886039621U, 1661344005U, + 3424285243U, 992588372U, 2500984144U, 2993248497U, 3590193895U, 1535327365U, + 515645636U, 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, + 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, 2501401703U, + 4285518317U, 3794656178U, 955526526U, 3442142820U, 3970298374U, 736025417U, + 2737370764U, 1271509744U, 440570731U, 136141826U, 1596189518U, 923399175U, + 257541519U, 3505774281U, 2194358432U, 2518162991U, 1379893637U, 2667767062U, + 3748146247U, 1821712620U, 3923161384U, 1947811444U, 2392527197U, + 4127419685U, 1423694998U, 4156576871U, 1382885582U, 3420127279U, + 3617499534U, 2994377493U, 4038063986U, 1918458672U, 2983166794U, + 4200449033U, 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, + 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, 2491531140U, + 4159725633U, 2272075455U, 759298618U, 201384554U, 838356250U, 1416268324U, + 674476934U, 90795364U, 141672229U, 3660399588U, 4196417251U, 3249270244U, + 3774530247U, 59587265U, 3683164208U, 19392575U, 1463123697U, 1882205379U, + 293780489U, 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, + 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, 2987802540U, + 641492617U, 2575442710U, 4217822703U, 3271835300U, 2836418300U, 3739921620U, + 2138378768U, 2879771855U, 4294903423U, 3121097946U, 2603440486U, + 2560820391U, 1012930944U, 2313499967U, 584489368U, 3431165766U, 897384869U, + 2062537737U, 2847889234U, 3742362450U, 2951174585U, 4204621084U, + 1109373893U, 3668075775U, 2750138839U, 3518055702U, 733072558U, 4169325400U, + 788493625U}; +static const uint64_t init_gen_rand_64_expected[] = {KQU(16924766246869039260), + KQU(8201438687333352714), KQU(2265290287015001750), + KQU(18397264611805473832), KQU(3375255223302384358), + KQU(6345559975416828796), KQU(18229739242790328073), + KQU(7596792742098800905), KQU(255338647169685981), KQU(2052747240048610300), + KQU(18328151576097299343), KQU(12472905421133796567), + KQU(11315245349717600863), KQU(16594110197775871209), + KQU(15708751964632456450), KQU(10452031272054632535), + KQU(11097646720811454386), KQU(4556090668445745441), + KQU(17116187693090663106), KQU(14931526836144510645), + KQU(9190752218020552591), KQU(9625800285771901401), + KQU(13995141077659972832), KQU(5194209094927829625), + KQU(4156788379151063303), KQU(8523452593770139494), + KQU(14082382103049296727), KQU(2462601863986088483), + KQU(3030583461592840678), KQU(5221622077872827681), + KQU(3084210671228981236), KQU(13956758381389953823), + KQU(13503889856213423831), KQU(15696904024189836170), + KQU(4612584152877036206), KQU(6231135538447867881), + KQU(10172457294158869468), KQU(6452258628466708150), + KQU(14044432824917330221), KQU(370168364480044279), + KQU(10102144686427193359), KQU(667870489994776076), + KQU(2732271956925885858), KQU(18027788905977284151), + KQU(15009842788582923859), KQU(7136357960180199542), + KQU(15901736243475578127), KQU(16951293785352615701), + KQU(10551492125243691632), KQU(17668869969146434804), + KQU(13646002971174390445), KQU(9804471050759613248), + KQU(5511670439655935493), KQU(18103342091070400926), + KQU(17224512747665137533), KQU(15534627482992618168), + KQU(1423813266186582647), KQU(15821176807932930024), KQU(30323369733607156), + KQU(11599382494723479403), KQU(653856076586810062), + KQU(3176437395144899659), KQU(14028076268147963917), + KQU(16156398271809666195), KQU(3166955484848201676), + KQU(5746805620136919390), KQU(17297845208891256593), + KQU(11691653183226428483), KQU(17900026146506981577), + KQU(15387382115755971042), KQU(16923567681040845943), + KQU(8039057517199388606), KQU(11748409241468629263), + KQU(794358245539076095), KQU(13438501964693401242), + KQU(14036803236515618962), KQU(5252311215205424721), + KQU(17806589612915509081), KQU(6802767092397596006), + KQU(14212120431184557140), KQU(1072951366761385712), + KQU(13098491780722836296), KQU(9466676828710797353), + KQU(12673056849042830081), KQU(12763726623645357580), + KQU(16468961652999309493), KQU(15305979875636438926), + KQU(17444713151223449734), KQU(5692214267627883674), + KQU(13049589139196151505), KQU(880115207831670745), + KQU(1776529075789695498), KQU(16695225897801466485), + KQU(10666901778795346845), KQU(6164389346722833869), + KQU(2863817793264300475), KQU(9464049921886304754), + KQU(3993566636740015468), KQU(9983749692528514136), + KQU(16375286075057755211), KQU(16042643417005440820), + KQU(11445419662923489877), KQU(7999038846885158836), + KQU(6721913661721511535), KQU(5363052654139357320), + KQU(1817788761173584205), KQU(13290974386445856444), + KQU(4650350818937984680), KQU(8219183528102484836), + KQU(1569862923500819899), KQU(4189359732136641860), + KQU(14202822961683148583), KQU(4457498315309429058), + KQU(13089067387019074834), KQU(11075517153328927293), + KQU(10277016248336668389), KQU(7070509725324401122), + KQU(17808892017780289380), KQU(13143367339909287349), + KQU(1377743745360085151), KQU(5749341807421286485), + KQU(14832814616770931325), KQU(7688820635324359492), + KQU(10960474011539770045), KQU(81970066653179790), + KQU(12619476072607878022), KQU(4419566616271201744), + KQU(15147917311750568503), KQU(5549739182852706345), + KQU(7308198397975204770), KQU(13580425496671289278), + KQU(17070764785210130301), KQU(8202832846285604405), + KQU(6873046287640887249), KQU(6927424434308206114), + KQU(6139014645937224874), KQU(10290373645978487639), + KQU(15904261291701523804), KQU(9628743442057826883), + KQU(18383429096255546714), KQU(4977413265753686967), + KQU(7714317492425012869), KQU(9025232586309926193), + KQU(14627338359776709107), KQU(14759849896467790763), + KQU(10931129435864423252), KQU(4588456988775014359), + KQU(10699388531797056724), KQU(468652268869238792), + KQU(5755943035328078086), KQU(2102437379988580216), + KQU(9986312786506674028), KQU(2654207180040945604), + KQU(8726634790559960062), KQU(100497234871808137), KQU(2800137176951425819), + KQU(6076627612918553487), KQU(5780186919186152796), + KQU(8179183595769929098), KQU(6009426283716221169), + KQU(2796662551397449358), KQU(1756961367041986764), + KQU(6972897917355606205), KQU(14524774345368968243), + KQU(2773529684745706940), KQU(4853632376213075959), + KQU(4198177923731358102), KQU(8271224913084139776), + KQU(2741753121611092226), KQU(16782366145996731181), + KQU(15426125238972640790), KQU(13595497100671260342), + KQU(3173531022836259898), KQU(6573264560319511662), + KQU(18041111951511157441), KQU(2351433581833135952), + KQU(3113255578908173487), KQU(1739371330877858784), + KQU(16046126562789165480), KQU(8072101652214192925), + KQU(15267091584090664910), KQU(9309579200403648940), + KQU(5218892439752408722), KQU(14492477246004337115), + KQU(17431037586679770619), KQU(7385248135963250480), + KQU(9580144956565560660), KQU(4919546228040008720), + KQU(15261542469145035584), KQU(18233297270822253102), + KQU(5453248417992302857), KQU(9309519155931460285), + KQU(10342813012345291756), KQU(15676085186784762381), + KQU(15912092950691300645), KQU(9371053121499003195), + KQU(9897186478226866746), KQU(14061858287188196327), + KQU(122575971620788119), KQU(12146750969116317754), + KQU(4438317272813245201), KQU(8332576791009527119), + KQU(13907785691786542057), KQU(10374194887283287467), + KQU(2098798755649059566), KQU(3416235197748288894), + KQU(8688269957320773484), KQU(7503964602397371571), + KQU(16724977015147478236), KQU(9461512855439858184), + KQU(13259049744534534727), KQU(3583094952542899294), + KQU(8764245731305528292), KQU(13240823595462088985), + KQU(13716141617617910448), KQU(18114969519935960955), + KQU(2297553615798302206), KQU(4585521442944663362), + KQU(17776858680630198686), KQU(4685873229192163363), + KQU(152558080671135627), KQU(15424900540842670088), + KQU(13229630297130024108), KQU(17530268788245718717), + KQU(16675633913065714144), KQU(3158912717897568068), + KQU(15399132185380087288), KQU(7401418744515677872), + KQU(13135412922344398535), KQU(6385314346100509511), + KQU(13962867001134161139), KQU(10272780155442671999), + KQU(12894856086597769142), KQU(13340877795287554994), + KQU(12913630602094607396), KQU(12543167911119793857), + KQU(17343570372251873096), KQU(10959487764494150545), + KQU(6966737953093821128), KQU(13780699135496988601), + KQU(4405070719380142046), KQU(14923788365607284982), + KQU(2869487678905148380), KQU(6416272754197188403), + KQU(15017380475943612591), KQU(1995636220918429487), + KQU(3402016804620122716), KQU(15800188663407057080), + KQU(11362369990390932882), KQU(15262183501637986147), + KQU(10239175385387371494), KQU(9352042420365748334), + KQU(1682457034285119875), KQU(1724710651376289644), + KQU(2038157098893817966), KQU(9897825558324608773), + KQU(1477666236519164736), KQU(16835397314511233640), + KQU(10370866327005346508), KQU(10157504370660621982), + KQU(12113904045335882069), KQU(13326444439742783008), + KQU(11302769043000765804), KQU(13594979923955228484), + KQU(11779351762613475968), KQU(3786101619539298383), + KQU(8021122969180846063), KQU(15745904401162500495), + KQU(10762168465993897267), KQU(13552058957896319026), + KQU(11200228655252462013), KQU(5035370357337441226), + KQU(7593918984545500013), KQU(5418554918361528700), + KQU(4858270799405446371), KQU(9974659566876282544), + KQU(18227595922273957859), KQU(2772778443635656220), + KQU(14285143053182085385), KQU(9939700992429600469), + KQU(12756185904545598068), KQU(2020783375367345262), KQU(57026775058331227), + KQU(950827867930065454), KQU(6602279670145371217), KQU(2291171535443566929), + KQU(5832380724425010313), KQU(1220343904715982285), + KQU(17045542598598037633), KQU(15460481779702820971), + KQU(13948388779949365130), KQU(13975040175430829518), + KQU(17477538238425541763), KQU(11104663041851745725), + KQU(15860992957141157587), KQU(14529434633012950138), + KQU(2504838019075394203), KQU(7512113882611121886), + KQU(4859973559980886617), KQU(1258601555703250219), + KQU(15594548157514316394), KQU(4516730171963773048), + KQU(11380103193905031983), KQU(6809282239982353344), + KQU(18045256930420065002), KQU(2453702683108791859), + KQU(977214582986981460), KQU(2006410402232713466), KQU(6192236267216378358), + KQU(3429468402195675253), KQU(18146933153017348921), + KQU(17369978576367231139), KQU(1246940717230386603), + KQU(11335758870083327110), KQU(14166488801730353682), + KQU(9008573127269635732), KQU(10776025389820643815), + KQU(15087605441903942962), KQU(1359542462712147922), + KQU(13898874411226454206), KQU(17911176066536804411), + KQU(9435590428600085274), KQU(294488509967864007), KQU(8890111397567922046), + KQU(7987823476034328778), KQU(13263827582440967651), + KQU(7503774813106751573), KQU(14974747296185646837), + KQU(8504765037032103375), KQU(17340303357444536213), + KQU(7704610912964485743), KQU(8107533670327205061), + KQU(9062969835083315985), KQU(16968963142126734184), + KQU(12958041214190810180), KQU(2720170147759570200), + KQU(2986358963942189566), KQU(14884226322219356580), + KQU(286224325144368520), KQU(11313800433154279797), + KQU(18366849528439673248), KQU(17899725929482368789), + KQU(3730004284609106799), KQU(1654474302052767205), + KQU(5006698007047077032), KQU(8196893913601182838), + KQU(15214541774425211640), KQU(17391346045606626073), + KQU(8369003584076969089), KQU(3939046733368550293), + KQU(10178639720308707785), KQU(2180248669304388697), KQU(62894391300126322), + KQU(9205708961736223191), KQU(6837431058165360438), + KQU(3150743890848308214), KQU(17849330658111464583), + KQU(12214815643135450865), KQU(13410713840519603402), + KQU(3200778126692046802), KQU(13354780043041779313), + KQU(800850022756886036), KQU(15660052933953067433), + KQU(6572823544154375676), KQU(11030281857015819266), + KQU(12682241941471433835), KQU(11654136407300274693), + KQU(4517795492388641109), KQU(9757017371504524244), + KQU(17833043400781889277), KQU(12685085201747792227), + KQU(10408057728835019573), KQU(98370418513455221), KQU(6732663555696848598), + KQU(13248530959948529780), KQU(3530441401230622826), + KQU(18188251992895660615), KQU(1847918354186383756), + KQU(1127392190402660921), KQU(11293734643143819463), + KQU(3015506344578682982), KQU(13852645444071153329), + KQU(2121359659091349142), KQU(1294604376116677694), + KQU(5616576231286352318), KQU(7112502442954235625), + KQU(11676228199551561689), KQU(12925182803007305359), + KQU(7852375518160493082), KQU(1136513130539296154), + KQU(5636923900916593195), KQU(3221077517612607747), + KQU(17784790465798152513), KQU(3554210049056995938), + KQU(17476839685878225874), KQU(3206836372585575732), + KQU(2765333945644823430), KQU(10080070903718799528), + KQU(5412370818878286353), KQU(9689685887726257728), + KQU(8236117509123533998), KQU(1951139137165040214), + KQU(4492205209227980349), KQU(16541291230861602967), + KQU(1424371548301437940), KQU(9117562079669206794), + KQU(14374681563251691625), KQU(13873164030199921303), + KQU(6680317946770936731), KQU(15586334026918276214), + KQU(10896213950976109802), KQU(9506261949596413689), + KQU(9903949574308040616), KQU(6038397344557204470), KQU(174601465422373648), + KQU(15946141191338238030), KQU(17142225620992044937), + KQU(7552030283784477064), KQU(2947372384532947997), KQU(510797021688197711), + KQU(4962499439249363461), KQU(23770320158385357), KQU(959774499105138124), + KQU(1468396011518788276), KQU(2015698006852312308), + KQU(4149400718489980136), KQU(5992916099522371188), + KQU(10819182935265531076), KQU(16189787999192351131), + KQU(342833961790261950), KQU(12470830319550495336), + KQU(18128495041912812501), KQU(1193600899723524337), + KQU(9056793666590079770), KQU(2154021227041669041), + KQU(4963570213951235735), KQU(4865075960209211409), + KQU(2097724599039942963), KQU(2024080278583179845), + KQU(11527054549196576736), KQU(10650256084182390252), + KQU(4808408648695766755), KQU(1642839215013788844), + KQU(10607187948250398390), KQU(7076868166085913508), + KQU(730522571106887032), KQU(12500579240208524895), + KQU(4484390097311355324), KQU(15145801330700623870), + KQU(8055827661392944028), KQU(5865092976832712268), + KQU(15159212508053625143), KQU(3560964582876483341), + KQU(4070052741344438280), KQU(6032585709886855634), + KQU(15643262320904604873), KQU(2565119772293371111), + KQU(318314293065348260), KQU(15047458749141511872), + KQU(7772788389811528730), KQU(7081187494343801976), + KQU(6465136009467253947), KQU(10425940692543362069), + KQU(554608190318339115), KQU(14796699860302125214), + KQU(1638153134431111443), KQU(10336967447052276248), + KQU(8412308070396592958), KQU(4004557277152051226), + KQU(8143598997278774834), KQU(16413323996508783221), + KQU(13139418758033994949), KQU(9772709138335006667), + KQU(2818167159287157659), KQU(17091740573832523669), + KQU(14629199013130751608), KQU(18268322711500338185), + KQU(8290963415675493063), KQU(8830864907452542588), + KQU(1614839084637494849), KQU(14855358500870422231), + KQU(3472996748392519937), KQU(15317151166268877716), + KQU(5825895018698400362), KQU(16730208429367544129), + KQU(10481156578141202800), KQU(4746166512382823750), + KQU(12720876014472464998), KQU(8825177124486735972), + KQU(13733447296837467838), KQU(6412293741681359625), + KQU(8313213138756135033), KQU(11421481194803712517), + KQU(7997007691544174032), KQU(6812963847917605930), + KQU(9683091901227558641), KQU(14703594165860324713), + KQU(1775476144519618309), KQU(2724283288516469519), KQU(717642555185856868), + KQU(8736402192215092346), KQU(11878800336431381021), + KQU(4348816066017061293), KQU(6115112756583631307), + KQU(9176597239667142976), KQU(12615622714894259204), + KQU(10283406711301385987), KQU(5111762509485379420), + KQU(3118290051198688449), KQU(7345123071632232145), + KQU(9176423451688682359), KQU(4843865456157868971), + KQU(12008036363752566088), KQU(12058837181919397720), + KQU(2145073958457347366), KQU(1526504881672818067), + KQU(3488830105567134848), KQU(13208362960674805143), + KQU(4077549672899572192), KQU(7770995684693818365), + KQU(1398532341546313593), KQU(12711859908703927840), + KQU(1417561172594446813), KQU(17045191024194170604), + KQU(4101933177604931713), KQU(14708428834203480320), + KQU(17447509264469407724), KQU(14314821973983434255), + KQU(17990472271061617265), KQU(5087756685841673942), + KQU(12797820586893859939), KQU(1778128952671092879), + KQU(3535918530508665898), KQU(9035729701042481301), + KQU(14808661568277079962), KQU(14587345077537747914), + KQU(11920080002323122708), KQU(6426515805197278753), + KQU(3295612216725984831), KQU(11040722532100876120), + KQU(12305952936387598754), KQU(16097391899742004253), + KQU(4908537335606182208), KQU(12446674552196795504), + KQU(16010497855816895177), KQU(9194378874788615551), + KQU(3382957529567613384), KQU(5154647600754974077), + KQU(9801822865328396141), KQU(9023662173919288143), + KQU(17623115353825147868), KQU(8238115767443015816), + KQU(15811444159859002560), KQU(9085612528904059661), + KQU(6888601089398614254), KQU(258252992894160189), KQU(6704363880792428622), + KQU(6114966032147235763), KQU(11075393882690261875), + KQU(8797664238933620407), KQU(5901892006476726920), + KQU(5309780159285518958), KQU(14940808387240817367), + KQU(14642032021449656698), KQU(9808256672068504139), + KQU(3670135111380607658), KQU(11211211097845960152), + KQU(1474304506716695808), KQU(15843166204506876239), + KQU(7661051252471780561), KQU(10170905502249418476), + KQU(7801416045582028589), KQU(2763981484737053050), + KQU(9491377905499253054), KQU(16201395896336915095), + KQU(9256513756442782198), KQU(5411283157972456034), + KQU(5059433122288321676), KQU(4327408006721123357), + KQU(9278544078834433377), KQU(7601527110882281612), + KQU(11848295896975505251), KQU(12096998801094735560), + KQU(14773480339823506413), KQU(15586227433895802149), + KQU(12786541257830242872), KQU(6904692985140503067), + KQU(5309011515263103959), KQU(12105257191179371066), + KQU(14654380212442225037), KQU(2556774974190695009), + KQU(4461297399927600261), KQU(14888225660915118646), + KQU(14915459341148291824), KQU(2738802166252327631), + KQU(6047155789239131512), KQU(12920545353217010338), + KQU(10697617257007840205), KQU(2751585253158203504), + KQU(13252729159780047496), KQU(14700326134672815469), + KQU(14082527904374600529), KQU(16852962273496542070), + KQU(17446675504235853907), KQU(15019600398527572311), + KQU(12312781346344081551), KQU(14524667935039810450), + KQU(5634005663377195738), KQU(11375574739525000569), + KQU(2423665396433260040), KQU(5222836914796015410), + KQU(4397666386492647387), KQU(4619294441691707638), KQU(665088602354770716), + KQU(13246495665281593610), KQU(6564144270549729409), + KQU(10223216188145661688), KQU(3961556907299230585), + KQU(11543262515492439914), KQU(16118031437285993790), + KQU(7143417964520166465), KQU(13295053515909486772), KQU(40434666004899675), + KQU(17127804194038347164), KQU(8599165966560586269), + KQU(8214016749011284903), KQU(13725130352140465239), + KQU(5467254474431726291), KQU(7748584297438219877), + KQU(16933551114829772472), KQU(2169618439506799400), + KQU(2169787627665113463), KQU(17314493571267943764), + KQU(18053575102911354912), KQU(11928303275378476973), + KQU(11593850925061715550), KQU(17782269923473589362), + KQU(3280235307704747039), KQU(6145343578598685149), + KQU(17080117031114086090), KQU(18066839902983594755), + KQU(6517508430331020706), KQU(8092908893950411541), + KQU(12558378233386153732), KQU(4476532167973132976), + KQU(16081642430367025016), KQU(4233154094369139361), + KQU(8693630486693161027), KQU(11244959343027742285), + KQU(12273503967768513508), KQU(14108978636385284876), + KQU(7242414665378826984), KQU(6561316938846562432), + KQU(8601038474994665795), KQU(17532942353612365904), + KQU(17940076637020912186), KQU(7340260368823171304), + KQU(7061807613916067905), KQU(10561734935039519326), + KQU(17990796503724650862), KQU(6208732943911827159), + KQU(359077562804090617), KQU(14177751537784403113), + KQU(10659599444915362902), KQU(15081727220615085833), + KQU(13417573895659757486), KQU(15513842342017811524), + KQU(11814141516204288231), KQU(1827312513875101814), + KQU(2804611699894603103), KQU(17116500469975602763), + KQU(12270191815211952087), KQU(12256358467786024988), + KQU(18435021722453971267), KQU(671330264390865618), KQU(476504300460286050), + KQU(16465470901027093441), KQU(4047724406247136402), + KQU(1322305451411883346), KQU(1388308688834322280), + KQU(7303989085269758176), KQU(9323792664765233642), + KQU(4542762575316368936), KQU(17342696132794337618), + KQU(4588025054768498379), KQU(13415475057390330804), + KQU(17880279491733405570), KQU(10610553400618620353), + KQU(3180842072658960139), KQU(13002966655454270120), + KQU(1665301181064982826), KQU(7083673946791258979), KQU(190522247122496820), + KQU(17388280237250677740), KQU(8430770379923642945), + KQU(12987180971921668584), KQU(2311086108365390642), + KQU(2870984383579822345), KQU(14014682609164653318), + KQU(14467187293062251484), KQU(192186361147413298), + KQU(15171951713531796524), KQU(9900305495015948728), + KQU(17958004775615466344), KQU(14346380954498606514), + KQU(18040047357617407096), KQU(5035237584833424532), + KQU(15089555460613972287), KQU(4131411873749729831), + KQU(1329013581168250330), KQU(10095353333051193949), + KQU(10749518561022462716), KQU(9050611429810755847), + KQU(15022028840236655649), KQU(8775554279239748298), + KQU(13105754025489230502), KQU(15471300118574167585), + KQU(89864764002355628), KQU(8776416323420466637), KQU(5280258630612040891), + KQU(2719174488591862912), KQU(7599309137399661994), + KQU(15012887256778039979), KQU(14062981725630928925), + KQU(12038536286991689603), KQU(7089756544681775245), + KQU(10376661532744718039), KQU(1265198725901533130), + KQU(13807996727081142408), KQU(2935019626765036403), + KQU(7651672460680700141), KQU(3644093016200370795), + KQU(2840982578090080674), KQU(17956262740157449201), + KQU(18267979450492880548), KQU(11799503659796848070), + KQU(9942537025669672388), KQU(11886606816406990297), + KQU(5488594946437447576), KQU(7226714353282744302), + KQU(3784851653123877043), KQU(878018453244803041), + KQU(12110022586268616085), KQU(734072179404675123), + KQU(11869573627998248542), KQU(469150421297783998), KQU(260151124912803804), + KQU(11639179410120968649), KQU(9318165193840846253), + KQU(12795671722734758075), KQU(15318410297267253933), + KQU(691524703570062620), KQU(5837129010576994601), + KQU(15045963859726941052), KQU(5850056944932238169), + KQU(12017434144750943807), KQU(7447139064928956574), + KQU(3101711812658245019), KQU(16052940704474982954), + KQU(18195745945986994042), KQU(8932252132785575659), + KQU(13390817488106794834), KQU(11582771836502517453), + KQU(4964411326683611686), KQU(2195093981702694011), + KQU(14145229538389675669), KQU(16459605532062271798), + KQU(866316924816482864), KQU(4593041209937286377), KQU(8415491391910972138), + KQU(4171236715600528969), KQU(16637569303336782889), + KQU(2002011073439212680), KQU(17695124661097601411), + KQU(4627687053598611702), KQU(7895831936020190403), + KQU(8455951300917267802), KQU(2923861649108534854), + KQU(8344557563927786255), KQU(6408671940373352556), + KQU(12210227354536675772), KQU(14294804157294222295), + KQU(10103022425071085127), KQU(10092959489504123771), + KQU(6554774405376736268), KQU(12629917718410641774), + KQU(6260933257596067126), KQU(2460827021439369673), + KQU(2541962996717103668), KQU(597377203127351475), KQU(5316984203117315309), + KQU(4811211393563241961), KQU(13119698597255811641), + KQU(8048691512862388981), KQU(10216818971194073842), + KQU(4612229970165291764), KQU(10000980798419974770), + KQU(6877640812402540687), KQU(1488727563290436992), + KQU(2227774069895697318), KQU(11237754507523316593), + KQU(13478948605382290972), KQU(1963583846976858124), + KQU(5512309205269276457), KQU(3972770164717652347), + KQU(3841751276198975037), KQU(10283343042181903117), + KQU(8564001259792872199), KQU(16472187244722489221), + KQU(8953493499268945921), KQU(3518747340357279580), + KQU(4003157546223963073), KQU(3270305958289814590), + KQU(3966704458129482496), KQU(8122141865926661939), + KQU(14627734748099506653), KQU(13064426990862560568), + KQU(2414079187889870829), KQU(5378461209354225306), + KQU(10841985740128255566), KQU(538582442885401738), + KQU(7535089183482905946), KQU(16117559957598879095), + KQU(8477890721414539741), KQU(1459127491209533386), + KQU(17035126360733620462), KQU(8517668552872379126), + KQU(10292151468337355014), KQU(17081267732745344157), + KQU(13751455337946087178), KQU(14026945459523832966), + KQU(6653278775061723516), KQU(10619085543856390441), + KQU(2196343631481122885), KQU(10045966074702826136), + KQU(10082317330452718282), KQU(5920859259504831242), + KQU(9951879073426540617), KQU(7074696649151414158), + KQU(15808193543879464318), KQU(7385247772746953374), + KQU(3192003544283864292), KQU(18153684490917593847), + KQU(12423498260668568905), KQU(10957758099756378169), + KQU(11488762179911016040), KQU(2099931186465333782), + KQU(11180979581250294432), KQU(8098916250668367933), + KQU(3529200436790763465), KQU(12988418908674681745), + KQU(6147567275954808580), KQU(3207503344604030989), + KQU(10761592604898615360), KQU(229854861031893504), + KQU(8809853962667144291), KQU(13957364469005693860), + KQU(7634287665224495886), KQU(12353487366976556874), + KQU(1134423796317152034), KQU(2088992471334107068), + KQU(7393372127190799698), KQU(1845367839871058391), KQU(207922563987322884), + KQU(11960870813159944976), KQU(12182120053317317363), + KQU(17307358132571709283), KQU(13871081155552824936), + KQU(18304446751741566262), KQU(7178705220184302849), + KQU(10929605677758824425), KQU(16446976977835806844), + KQU(13723874412159769044), KQU(6942854352100915216), + KQU(1726308474365729390), KQU(2150078766445323155), + KQU(15345558947919656626), KQU(12145453828874527201), + KQU(2054448620739726849), KQU(2740102003352628137), + KQU(11294462163577610655), KQU(756164283387413743), + KQU(17841144758438810880), KQU(10802406021185415861), + KQU(8716455530476737846), KQU(6321788834517649606), + KQU(14681322910577468426), KQU(17330043563884336387), + KQU(12701802180050071614), KQU(14695105111079727151), + KQU(5112098511654172830), KQU(4957505496794139973), + KQU(8270979451952045982), KQU(12307685939199120969), + KQU(12425799408953443032), KQU(8376410143634796588), + KQU(16621778679680060464), KQU(3580497854566660073), + KQU(1122515747803382416), KQU(857664980960597599), KQU(6343640119895925918), + KQU(12878473260854462891), KQU(10036813920765722626), + KQU(14451335468363173812), KQU(5476809692401102807), + KQU(16442255173514366342), KQU(13060203194757167104), + KQU(14354124071243177715), KQU(15961249405696125227), + KQU(13703893649690872584), KQU(363907326340340064), + KQU(6247455540491754842), KQU(12242249332757832361), + KQU(156065475679796717), KQU(9351116235749732355), KQU(4590350628677701405), + KQU(1671195940982350389), KQU(13501398458898451905), + KQU(6526341991225002255), KQU(1689782913778157592), + KQU(7439222350869010334), KQU(13975150263226478308), + KQU(11411961169932682710), KQU(17204271834833847277), + KQU(541534742544435367), KQU(6591191931218949684), KQU(2645454775478232486), + KQU(4322857481256485321), KQU(8477416487553065110), + KQU(12902505428548435048), KQU(971445777981341415), + KQU(14995104682744976712), KQU(4243341648807158063), + KQU(8695061252721927661), KQU(5028202003270177222), + KQU(2289257340915567840), KQU(13870416345121866007), + KQU(13994481698072092233), KQU(6912785400753196481), + KQU(2278309315841980139), KQU(4329765449648304839), + KQU(5963108095785485298), KQU(4880024847478722478), + KQU(16015608779890240947), KQU(1866679034261393544), + KQU(914821179919731519), KQU(9643404035648760131), KQU(2418114953615593915), + KQU(944756836073702374), KQU(15186388048737296834), + KQU(7723355336128442206), KQU(7500747479679599691), + KQU(18013961306453293634), KQU(2315274808095756456), + KQU(13655308255424029566), KQU(17203800273561677098), + KQU(1382158694422087756), KQU(5090390250309588976), KQU(517170818384213989), + KQU(1612709252627729621), KQU(1330118955572449606), KQU(300922478056709885), + KQU(18115693291289091987), KQU(13491407109725238321), + KQU(15293714633593827320), KQU(5151539373053314504), + KQU(5951523243743139207), KQU(14459112015249527975), + KQU(5456113959000700739), KQU(3877918438464873016), + KQU(12534071654260163555), KQU(15871678376893555041), + KQU(11005484805712025549), KQU(16353066973143374252), + KQU(4358331472063256685), KQU(8268349332210859288), + KQU(12485161590939658075), KQU(13955993592854471343), + KQU(5911446886848367039), KQU(14925834086813706974), + KQU(6590362597857994805), KQU(1280544923533661875), + KQU(1637756018947988164), KQU(4734090064512686329), + KQU(16693705263131485912), KQU(6834882340494360958), + KQU(8120732176159658505), KQU(2244371958905329346), + KQU(10447499707729734021), KQU(7318742361446942194), + KQU(8032857516355555296), KQU(14023605983059313116), + KQU(1032336061815461376), KQU(9840995337876562612), + KQU(9869256223029203587), KQU(12227975697177267636), + KQU(12728115115844186033), KQU(7752058479783205470), + KQU(729733219713393087), KQU(12954017801239007622)}; +static const uint64_t init_by_array_64_expected[] = {KQU(2100341266307895239), + KQU(8344256300489757943), KQU(15687933285484243894), + KQU(8268620370277076319), KQU(12371852309826545459), + KQU(8800491541730110238), KQU(18113268950100835773), + KQU(2886823658884438119), KQU(3293667307248180724), + KQU(9307928143300172731), KQU(7688082017574293629), KQU(900986224735166665), + KQU(9977972710722265039), KQU(6008205004994830552), KQU(546909104521689292), + KQU(7428471521869107594), KQU(14777563419314721179), + KQU(16116143076567350053), KQU(5322685342003142329), + KQU(4200427048445863473), KQU(4693092150132559146), + KQU(13671425863759338582), KQU(6747117460737639916), + KQU(4732666080236551150), KQU(5912839950611941263), + KQU(3903717554504704909), KQU(2615667650256786818), + KQU(10844129913887006352), KQU(13786467861810997820), + KQU(14267853002994021570), KQU(13767807302847237439), + KQU(16407963253707224617), KQU(4802498363698583497), + KQU(2523802839317209764), KQU(3822579397797475589), + KQU(8950320572212130610), KQU(3745623504978342534), + KQU(16092609066068482806), KQU(9817016950274642398), + KQU(10591660660323829098), KQU(11751606650792815920), + KQU(5122873818577122211), KQU(17209553764913936624), + KQU(6249057709284380343), KQU(15088791264695071830), + KQU(15344673071709851930), KQU(4345751415293646084), + KQU(2542865750703067928), KQU(13520525127852368784), + KQU(18294188662880997241), KQU(3871781938044881523), + KQU(2873487268122812184), KQU(15099676759482679005), + KQU(15442599127239350490), KQU(6311893274367710888), + KQU(3286118760484672933), KQU(4146067961333542189), + KQU(13303942567897208770), KQU(8196013722255630418), + KQU(4437815439340979989), KQU(15433791533450605135), + KQU(4254828956815687049), KQU(1310903207708286015), + KQU(10529182764462398549), KQU(14900231311660638810), + KQU(9727017277104609793), KQU(1821308310948199033), + KQU(11628861435066772084), KQU(9469019138491546924), + KQU(3145812670532604988), KQU(9938468915045491919), + KQU(1562447430672662142), KQU(13963995266697989134), + KQU(3356884357625028695), KQU(4499850304584309747), + KQU(8456825817023658122), KQU(10859039922814285279), + KQU(8099512337972526555), KQU(348006375109672149), + KQU(11919893998241688603), KQU(1104199577402948826), + KQU(16689191854356060289), KQU(10992552041730168078), + KQU(7243733172705465836), KQU(5668075606180319560), + KQU(18182847037333286970), KQU(4290215357664631322), + KQU(4061414220791828613), KQU(13006291061652989604), + KQU(7140491178917128798), KQU(12703446217663283481), + KQU(5500220597564558267), KQU(10330551509971296358), + KQU(15958554768648714492), KQU(5174555954515360045), + KQU(1731318837687577735), KQU(3557700801048354857), + KQU(13764012341928616198), KQU(13115166194379119043), + KQU(7989321021560255519), KQU(2103584280905877040), + KQU(9230788662155228488), KQU(16396629323325547654), + KQU(657926409811318051), KQU(15046700264391400727), + KQU(5120132858771880830), KQU(7934160097989028561), + KQU(6963121488531976245), KQU(17412329602621742089), + KQU(15144843053931774092), KQU(17204176651763054532), + KQU(13166595387554065870), KQU(8590377810513960213), + KQU(5834365135373991938), KQU(7640913007182226243), + KQU(3479394703859418425), KQU(16402784452644521040), + KQU(4993979809687083980), KQU(13254522168097688865), + KQU(15643659095244365219), KQU(5881437660538424982), + KQU(11174892200618987379), KQU(254409966159711077), + KQU(17158413043140549909), KQU(3638048789290376272), + KQU(1376816930299489190), KQU(4622462095217761923), + KQU(15086407973010263515), KQU(13253971772784692238), + KQU(5270549043541649236), KQU(11182714186805411604), + KQU(12283846437495577140), KQU(5297647149908953219), + KQU(10047451738316836654), KQU(4938228100367874746), + KQU(12328523025304077923), KQU(3601049438595312361), + KQU(9313624118352733770), KQU(13322966086117661798), + KQU(16660005705644029394), KQU(11337677526988872373), + KQU(13869299102574417795), KQU(15642043183045645437), + KQU(3021755569085880019), KQU(4979741767761188161), + KQU(13679979092079279587), KQU(3344685842861071743), + KQU(13947960059899588104), KQU(305806934293368007), + KQU(5749173929201650029), KQU(11123724852118844098), + KQU(15128987688788879802), KQU(15251651211024665009), + KQU(7689925933816577776), KQU(16732804392695859449), + KQU(17087345401014078468), KQU(14315108589159048871), + KQU(4820700266619778917), KQU(16709637539357958441), + KQU(4936227875177351374), KQU(2137907697912987247), + KQU(11628565601408395420), KQU(2333250549241556786), + KQU(5711200379577778637), KQU(5170680131529031729), + KQU(12620392043061335164), KQU(95363390101096078), KQU(5487981914081709462), + KQU(1763109823981838620), KQU(3395861271473224396), + KQU(1300496844282213595), KQU(6894316212820232902), + KQU(10673859651135576674), KQU(5911839658857903252), + KQU(17407110743387299102), KQU(8257427154623140385), + KQU(11389003026741800267), KQU(4070043211095013717), + KQU(11663806997145259025), KQU(15265598950648798210), + KQU(630585789434030934), KQU(3524446529213587334), KQU(7186424168495184211), + KQU(10806585451386379021), KQU(11120017753500499273), + KQU(1586837651387701301), KQU(17530454400954415544), + KQU(9991670045077880430), KQU(7550997268990730180), + KQU(8640249196597379304), KQU(3522203892786893823), + KQU(10401116549878854788), KQU(13690285544733124852), + KQU(8295785675455774586), KQU(15535716172155117603), + KQU(3112108583723722511), KQU(17633179955339271113), + KQU(18154208056063759375), KQU(1866409236285815666), + KQU(13326075895396412882), KQU(8756261842948020025), + KQU(6281852999868439131), KQU(15087653361275292858), + KQU(10333923911152949397), KQU(5265567645757408500), + KQU(12728041843210352184), KQU(6347959327507828759), + KQU(154112802625564758), KQU(18235228308679780218), + KQU(3253805274673352418), KQU(4849171610689031197), + KQU(17948529398340432518), KQU(13803510475637409167), + KQU(13506570190409883095), KQU(15870801273282960805), + KQU(8451286481299170773), KQU(9562190620034457541), + KQU(8518905387449138364), KQU(12681306401363385655), + KQU(3788073690559762558), KQU(5256820289573487769), + KQU(2752021372314875467), KQU(6354035166862520716), + KQU(4328956378309739069), KQU(449087441228269600), KQU(5533508742653090868), + KQU(1260389420404746988), KQU(18175394473289055097), + KQU(1535467109660399420), KQU(8818894282874061442), + KQU(12140873243824811213), KQU(15031386653823014946), + KQU(1286028221456149232), KQU(6329608889367858784), + KQU(9419654354945132725), KQU(6094576547061672379), + KQU(17706217251847450255), KQU(1733495073065878126), + KQU(16918923754607552663), KQU(8881949849954945044), + KQU(12938977706896313891), KQU(14043628638299793407), + KQU(18393874581723718233), KQU(6886318534846892044), + KQU(14577870878038334081), KQU(13541558383439414119), + KQU(13570472158807588273), KQU(18300760537910283361), + KQU(818368572800609205), KQU(1417000585112573219), + KQU(12337533143867683655), KQU(12433180994702314480), + KQU(778190005829189083), KQU(13667356216206524711), + KQU(9866149895295225230), KQU(11043240490417111999), + KQU(1123933826541378598), KQU(6469631933605123610), + KQU(14508554074431980040), KQU(13918931242962026714), + KQU(2870785929342348285), KQU(14786362626740736974), + KQU(13176680060902695786), KQU(9591778613541679456), + KQU(9097662885117436706), KQU(749262234240924947), KQU(1944844067793307093), + KQU(4339214904577487742), KQU(8009584152961946551), + KQU(16073159501225501777), KQU(3335870590499306217), + KQU(17088312653151202847), KQU(3108893142681931848), + KQU(16636841767202792021), KQU(10423316431118400637), + KQU(8008357368674443506), KQU(11340015231914677875), + KQU(17687896501594936090), KQU(15173627921763199958), + KQU(542569482243721959), KQU(15071714982769812975), + KQU(4466624872151386956), KQU(1901780715602332461), + KQU(9822227742154351098), KQU(1479332892928648780), + KQU(6981611948382474400), KQU(7620824924456077376), + KQU(14095973329429406782), KQU(7902744005696185404), + KQU(15830577219375036920), KQU(10287076667317764416), + KQU(12334872764071724025), KQU(4419302088133544331), + KQU(14455842851266090520), KQU(12488077416504654222), + KQU(7953892017701886766), KQU(6331484925529519007), + KQU(4902145853785030022), KQU(17010159216096443073), + KQU(11945354668653886087), KQU(15112022728645230829), + KQU(17363484484522986742), KQU(4423497825896692887), + KQU(8155489510809067471), KQU(258966605622576285), KQU(5462958075742020534), + KQU(6763710214913276228), KQU(2368935183451109054), + KQU(14209506165246453811), KQU(2646257040978514881), + KQU(3776001911922207672), KQU(1419304601390147631), + KQU(14987366598022458284), KQU(3977770701065815721), + KQU(730820417451838898), KQU(3982991703612885327), KQU(2803544519671388477), + KQU(17067667221114424649), KQU(2922555119737867166), + KQU(1989477584121460932), KQU(15020387605892337354), + KQU(9293277796427533547), KQU(10722181424063557247), + KQU(16704542332047511651), KQU(5008286236142089514), + KQU(16174732308747382540), KQU(17597019485798338402), + KQU(13081745199110622093), KQU(8850305883842258115), + KQU(12723629125624589005), KQU(8140566453402805978), + KQU(15356684607680935061), KQU(14222190387342648650), + KQU(11134610460665975178), KQU(1259799058620984266), + KQU(13281656268025610041), KQU(298262561068153992), + KQU(12277871700239212922), KQU(13911297774719779438), + KQU(16556727962761474934), KQU(17903010316654728010), + KQU(9682617699648434744), KQU(14757681836838592850), + KQU(1327242446558524473), KQU(11126645098780572792), + KQU(1883602329313221774), KQU(2543897783922776873), + KQU(15029168513767772842), KQU(12710270651039129878), + KQU(16118202956069604504), KQU(15010759372168680524), + KQU(2296827082251923948), KQU(10793729742623518101), + KQU(13829764151845413046), KQU(17769301223184451213), + KQU(3118268169210783372), KQU(17626204544105123127), + KQU(7416718488974352644), KQU(10450751996212925994), + KQU(9352529519128770586), KQU(259347569641110140), KQU(8048588892269692697), + KQU(1774414152306494058), KQU(10669548347214355622), + KQU(13061992253816795081), KQU(18432677803063861659), + KQU(8879191055593984333), KQU(12433753195199268041), + KQU(14919392415439730602), KQU(6612848378595332963), + KQU(6320986812036143628), KQU(10465592420226092859), + KQU(4196009278962570808), KQU(3747816564473572224), + KQU(17941203486133732898), KQU(2350310037040505198), + KQU(5811779859134370113), KQU(10492109599506195126), + KQU(7699650690179541274), KQU(1954338494306022961), + KQU(14095816969027231152), KQU(5841346919964852061), + KQU(14945969510148214735), KQU(3680200305887550992), + KQU(6218047466131695792), KQU(8242165745175775096), + KQU(11021371934053307357), KQU(1265099502753169797), + KQU(4644347436111321718), KQU(3609296916782832859), + KQU(8109807992218521571), KQU(18387884215648662020), + KQU(14656324896296392902), KQU(17386819091238216751), + KQU(17788300878582317152), KQU(7919446259742399591), + KQU(4466613134576358004), KQU(12928181023667938509), + KQU(13147446154454932030), KQU(16552129038252734620), + KQU(8395299403738822450), KQU(11313817655275361164), + KQU(434258809499511718), KQU(2074882104954788676), KQU(7929892178759395518), + KQU(9006461629105745388), KQU(5176475650000323086), + KQU(11128357033468341069), KQU(12026158851559118955), + KQU(14699716249471156500), KQU(448982497120206757), + KQU(4156475356685519900), KQU(6063816103417215727), + KQU(10073289387954971479), KQU(8174466846138590962), + KQU(2675777452363449006), KQU(9090685420572474281), + KQU(6659652652765562060), KQU(12923120304018106621), + KQU(11117480560334526775), KQU(937910473424587511), + KQU(1838692113502346645), KQU(11133914074648726180), + KQU(7922600945143884053), KQU(13435287702700959550), + KQU(5287964921251123332), KQU(11354875374575318947), + KQU(17955724760748238133), KQU(13728617396297106512), + KQU(4107449660118101255), KQU(1210269794886589623), + KQU(11408687205733456282), KQU(4538354710392677887), + KQU(13566803319341319267), KQU(17870798107734050771), + KQU(3354318982568089135), KQU(9034450839405133651), + KQU(13087431795753424314), KQU(950333102820688239), + KQU(1968360654535604116), KQU(16840551645563314995), + KQU(8867501803892924995), KQU(11395388644490626845), + KQU(1529815836300732204), KQU(13330848522996608842), + KQU(1813432878817504265), KQU(2336867432693429560), + KQU(15192805445973385902), KQU(2528593071076407877), + KQU(128459777936689248), KQU(9976345382867214866), KQU(6208885766767996043), + KQU(14982349522273141706), KQU(3099654362410737822), + KQU(13776700761947297661), KQU(8806185470684925550), + KQU(8151717890410585321), KQU(640860591588072925), + KQU(14592096303937307465), KQU(9056472419613564846), + KQU(14861544647742266352), KQU(12703771500398470216), + KQU(3142372800384138465), KQU(6201105606917248196), + KQU(18337516409359270184), KQU(15042268695665115339), + KQU(15188246541383283846), KQU(12800028693090114519), + KQU(5992859621101493472), KQU(18278043971816803521), + KQU(9002773075219424560), KQU(7325707116943598353), + KQU(7930571931248040822), KQU(5645275869617023448), + KQU(7266107455295958487), KQU(4363664528273524411), + KQU(14313875763787479809), KQU(17059695613553486802), + KQU(9247761425889940932), KQU(13704726459237593128), + KQU(2701312427328909832), KQU(17235532008287243115), + KQU(14093147761491729538), KQU(6247352273768386516), + KQU(8268710048153268415), KQU(7985295214477182083), + KQU(15624495190888896807), KQU(3772753430045262788), + KQU(9133991620474991698), KQU(5665791943316256028), + KQU(7551996832462193473), KQU(13163729206798953877), + KQU(9263532074153846374), KQU(1015460703698618353), + KQU(17929874696989519390), KQU(18257884721466153847), + KQU(16271867543011222991), KQU(3905971519021791941), + KQU(16814488397137052085), KQU(1321197685504621613), + KQU(2870359191894002181), KQU(14317282970323395450), + KQU(13663920845511074366), KQU(2052463995796539594), + KQU(14126345686431444337), KQU(1727572121947022534), + KQU(17793552254485594241), KQU(6738857418849205750), + KQU(1282987123157442952), KQU(16655480021581159251), + KQU(6784587032080183866), KQU(14726758805359965162), + KQU(7577995933961987349), KQU(12539609320311114036), + KQU(10789773033385439494), KQU(8517001497411158227), + KQU(10075543932136339710), KQU(14838152340938811081), + KQU(9560840631794044194), KQU(17445736541454117475), + KQU(10633026464336393186), KQU(15705729708242246293), + KQU(1117517596891411098), KQU(4305657943415886942), + KQU(4948856840533979263), KQU(16071681989041789593), + KQU(13723031429272486527), KQU(7639567622306509462), + KQU(12670424537483090390), KQU(9715223453097197134), + KQU(5457173389992686394), KQU(289857129276135145), + KQU(17048610270521972512), KQU(692768013309835485), + KQU(14823232360546632057), KQU(18218002361317895936), + KQU(3281724260212650204), KQU(16453957266549513795), + KQU(8592711109774511881), KQU(929825123473369579), + KQU(15966784769764367791), KQU(9627344291450607588), + KQU(10849555504977813287), KQU(9234566913936339275), + KQU(6413807690366911210), KQU(10862389016184219267), + KQU(13842504799335374048), KQU(1531994113376881174), + KQU(2081314867544364459), KQU(16430628791616959932), + KQU(8314714038654394368), KQU(9155473892098431813), + KQU(12577843786670475704), KQU(4399161106452401017), + KQU(1668083091682623186), KQU(1741383777203714216), + KQU(2162597285417794374), KQU(15841980159165218736), + KQU(1971354603551467079), KQU(1206714764913205968), + KQU(4790860439591272330), KQU(14699375615594055799), + KQU(8374423871657449988), KQU(10950685736472937738), + KQU(697344331343267176), KQU(10084998763118059810), + KQU(12897369539795983124), KQU(12351260292144383605), + KQU(1268810970176811234), KQU(7406287800414582768), KQU(516169557043807831), + KQU(5077568278710520380), KQU(3828791738309039304), + KQU(7721974069946943610), KQU(3534670260981096460), + KQU(4865792189600584891), KQU(16892578493734337298), + KQU(9161499464278042590), KQU(11976149624067055931), + KQU(13219479887277343990), KQU(14161556738111500680), + KQU(14670715255011223056), KQU(4671205678403576558), + KQU(12633022931454259781), KQU(14821376219869187646), + KQU(751181776484317028), KQU(2192211308839047070), + KQU(11787306362361245189), KQU(10672375120744095707), + KQU(4601972328345244467), KQU(15457217788831125879), + KQU(8464345256775460809), KQU(10191938789487159478), + KQU(6184348739615197613), KQU(11425436778806882100), + KQU(2739227089124319793), KQU(461464518456000551), KQU(4689850170029177442), + KQU(6120307814374078625), KQU(11153579230681708671), + KQU(7891721473905347926), KQU(10281646937824872400), + KQU(3026099648191332248), KQU(8666750296953273818), + KQU(14978499698844363232), KQU(13303395102890132065), + KQU(8182358205292864080), KQU(10560547713972971291), + KQU(11981635489418959093), KQU(3134621354935288409), + KQU(11580681977404383968), KQU(14205530317404088650), + KQU(5997789011854923157), KQU(13659151593432238041), + KQU(11664332114338865086), KQU(7490351383220929386), + KQU(7189290499881530378), KQU(15039262734271020220), + KQU(2057217285976980055), KQU(555570804905355739), + KQU(11235311968348555110), KQU(13824557146269603217), + KQU(16906788840653099693), KQU(7222878245455661677), + KQU(5245139444332423756), KQU(4723748462805674292), + KQU(12216509815698568612), KQU(17402362976648951187), + KQU(17389614836810366768), KQU(4880936484146667711), + KQU(9085007839292639880), KQU(13837353458498535449), + KQU(11914419854360366677), KQU(16595890135313864103), + KQU(6313969847197627222), KQU(18296909792163910431), + KQU(10041780113382084042), KQU(2499478551172884794), + KQU(11057894246241189489), KQU(9742243032389068555), + KQU(12838934582673196228), KQU(13437023235248490367), + KQU(13372420669446163240), KQU(6752564244716909224), + KQU(7157333073400313737), KQU(12230281516370654308), + KQU(1182884552219419117), KQU(2955125381312499218), + KQU(10308827097079443249), KQU(1337648572986534958), + KQU(16378788590020343939), KQU(108619126514420935), + KQU(3990981009621629188), KQU(5460953070230946410), + KQU(9703328329366531883), KQU(13166631489188077236), + KQU(1104768831213675170), KQU(3447930458553877908), + KQU(8067172487769945676), KQU(5445802098190775347), + KQU(3244840981648973873), KQU(17314668322981950060), + KQU(5006812527827763807), KQU(18158695070225526260), + KQU(2824536478852417853), KQU(13974775809127519886), + KQU(9814362769074067392), KQU(17276205156374862128), + KQU(11361680725379306967), KQU(3422581970382012542), + KQU(11003189603753241266), KQU(11194292945277862261), + KQU(6839623313908521348), KQU(11935326462707324634), + KQU(1611456788685878444), KQU(13112620989475558907), + KQU(517659108904450427), KQU(13558114318574407624), + KQU(15699089742731633077), KQU(4988979278862685458), + KQU(8111373583056521297), KQU(3891258746615399627), + KQU(8137298251469718086), KQU(12748663295624701649), + KQU(4389835683495292062), KQU(5775217872128831729), + KQU(9462091896405534927), KQU(8498124108820263989), + KQU(8059131278842839525), KQU(10503167994254090892), + KQU(11613153541070396656), KQU(18069248738504647790), + KQU(570657419109768508), KQU(3950574167771159665), KQU(5514655599604313077), + KQU(2908460854428484165), KQU(10777722615935663114), + KQU(12007363304839279486), KQU(9800646187569484767), + KQU(8795423564889864287), KQU(14257396680131028419), + KQU(6405465117315096498), KQU(7939411072208774878), + KQU(17577572378528990006), KQU(14785873806715994850), + KQU(16770572680854747390), KQU(18127549474419396481), + KQU(11637013449455757750), KQU(14371851933996761086), + KQU(3601181063650110280), KQU(4126442845019316144), + KQU(10198287239244320669), KQU(18000169628555379659), + KQU(18392482400739978269), KQU(6219919037686919957), + KQU(3610085377719446052), KQU(2513925039981776336), + KQU(16679413537926716955), KQU(12903302131714909434), + KQU(5581145789762985009), KQU(12325955044293303233), + KQU(17216111180742141204), KQU(6321919595276545740), + KQU(3507521147216174501), KQU(9659194593319481840), + KQU(11473976005975358326), KQU(14742730101435987026), + KQU(492845897709954780), KQU(16976371186162599676), + KQU(17712703422837648655), KQU(9881254778587061697), + KQU(8413223156302299551), KQU(1563841828254089168), + KQU(9996032758786671975), KQU(138877700583772667), + KQU(13003043368574995989), KQU(4390573668650456587), + KQU(8610287390568126755), KQU(15126904974266642199), + KQU(6703637238986057662), KQU(2873075592956810157), + KQU(6035080933946049418), KQU(13382846581202353014), + KQU(7303971031814642463), KQU(18418024405307444267), + KQU(5847096731675404647), KQU(4035880699639842500), + KQU(11525348625112218478), KQU(3041162365459574102), + KQU(2604734487727986558), KQU(15526341771636983145), + KQU(14556052310697370254), KQU(12997787077930808155), + KQU(9601806501755554499), KQU(11349677952521423389), + KQU(14956777807644899350), KQU(16559736957742852721), + KQU(12360828274778140726), KQU(6685373272009662513), + KQU(16932258748055324130), KQU(15918051131954158508), + KQU(1692312913140790144), KQU(546653826801637367), KQU(5341587076045986652), + KQU(14975057236342585662), KQU(12374976357340622412), + KQU(10328833995181940552), KQU(12831807101710443149), + KQU(10548514914382545716), KQU(2217806727199715993), + KQU(12627067369242845138), KQU(4598965364035438158), + KQU(150923352751318171), KQU(14274109544442257283), + KQU(4696661475093863031), KQU(1505764114384654516), + KQU(10699185831891495147), KQU(2392353847713620519), + KQU(3652870166711788383), KQU(8640653276221911108), + KQU(3894077592275889704), KQU(4918592872135964845), + KQU(16379121273281400789), KQU(12058465483591683656), + KQU(11250106829302924945), KQU(1147537556296983005), + KQU(6376342756004613268), KQU(14967128191709280506), + KQU(18007449949790627628), KQU(9497178279316537841), + KQU(7920174844809394893), KQU(10037752595255719907), + KQU(15875342784985217697), KQU(15311615921712850696), + KQU(9552902652110992950), KQU(14054979450099721140), + KQU(5998709773566417349), KQU(18027910339276320187), + KQU(8223099053868585554), KQU(7842270354824999767), + KQU(4896315688770080292), KQU(12969320296569787895), + KQU(2674321489185759961), KQU(4053615936864718439), + KQU(11349775270588617578), KQU(4743019256284553975), + KQU(5602100217469723769), KQU(14398995691411527813), + KQU(7412170493796825470), KQU(836262406131744846), KQU(8231086633845153022), + KQU(5161377920438552287), KQU(8828731196169924949), + KQU(16211142246465502680), KQU(3307990879253687818), + KQU(5193405406899782022), KQU(8510842117467566693), + KQU(6070955181022405365), KQU(14482950231361409799), + KQU(12585159371331138077), KQU(3511537678933588148), + KQU(2041849474531116417), KQU(10944936685095345792), + KQU(18303116923079107729), KQU(2720566371239725320), + KQU(4958672473562397622), KQU(3032326668253243412), + KQU(13689418691726908338), KQU(1895205511728843996), + KQU(8146303515271990527), KQU(16507343500056113480), + KQU(473996939105902919), KQU(9897686885246881481), + KQU(14606433762712790575), KQU(6732796251605566368), + KQU(1399778120855368916), KQU(935023885182833777), + KQU(16066282816186753477), KQU(7291270991820612055), + KQU(17530230393129853844), KQU(10223493623477451366), + KQU(15841725630495676683), KQU(17379567246435515824), + KQU(8588251429375561971), KQU(18339511210887206423), + KQU(17349587430725976100), KQU(12244876521394838088), + KQU(6382187714147161259), KQU(12335807181848950831), + KQU(16948885622305460665), KQU(13755097796371520506), + KQU(14806740373324947801), KQU(4828699633859287703), + KQU(8209879281452301604), KQU(12435716669553736437), + KQU(13970976859588452131), KQU(6233960842566773148), + KQU(12507096267900505759), KQU(1198713114381279421), + KQU(14989862731124149015), KQU(15932189508707978949), + KQU(2526406641432708722), KQU(29187427817271982), KQU(1499802773054556353), + KQU(10816638187021897173), KQU(5436139270839738132), + KQU(6659882287036010082), KQU(2154048955317173697), + KQU(10887317019333757642), KQU(16281091802634424955), + KQU(10754549879915384901), KQU(10760611745769249815), + KQU(2161505946972504002), KQU(5243132808986265107), + KQU(10129852179873415416), KQU(710339480008649081), + KQU(7802129453068808528), KQU(17967213567178907213), + KQU(15730859124668605599), KQU(13058356168962376502), + KQU(3701224985413645909), KQU(14464065869149109264), + KQU(9959272418844311646), KQU(10157426099515958752), + KQU(14013736814538268528), KQU(17797456992065653951), + KQU(17418878140257344806), KQU(15457429073540561521), + KQU(2184426881360949378), KQU(2062193041154712416), + KQU(8553463347406931661), KQU(4913057625202871854), + KQU(2668943682126618425), KQU(17064444737891172288), + KQU(4997115903913298637), KQU(12019402608892327416), + KQU(17603584559765897352), KQU(11367529582073647975), + KQU(8211476043518436050), KQU(8676849804070323674), + KQU(18431829230394475730), KQU(10490177861361247904), + KQU(9508720602025651349), KQU(7409627448555722700), + KQU(5804047018862729008), KQU(11943858176893142594), + KQU(11908095418933847092), KQU(5415449345715887652), + KQU(1554022699166156407), KQU(9073322106406017161), + KQU(7080630967969047082), KQU(18049736940860732943), + KQU(12748714242594196794), KQU(1226992415735156741), + KQU(17900981019609531193), KQU(11720739744008710999), + KQU(3006400683394775434), KQU(11347974011751996028), + KQU(3316999628257954608), KQU(8384484563557639101), + KQU(18117794685961729767), KQU(1900145025596618194), + KQU(17459527840632892676), KQU(5634784101865710994), + KQU(7918619300292897158), KQU(3146577625026301350), + KQU(9955212856499068767), KQU(1873995843681746975), + KQU(1561487759967972194), KQU(8322718804375878474), + KQU(11300284215327028366), KQU(4667391032508998982), + KQU(9820104494306625580), KQU(17922397968599970610), + KQU(1784690461886786712), KQU(14940365084341346821), + KQU(5348719575594186181), KQU(10720419084507855261), + KQU(14210394354145143274), KQU(2426468692164000131), + KQU(16271062114607059202), KQU(14851904092357070247), + KQU(6524493015693121897), KQU(9825473835127138531), + KQU(14222500616268569578), KQU(15521484052007487468), + KQU(14462579404124614699), KQU(11012375590820665520), + KQU(11625327350536084927), KQU(14452017765243785417), + KQU(9989342263518766305), KQU(3640105471101803790), + KQU(4749866455897513242), KQU(13963064946736312044), + KQU(10007416591973223791), KQU(18314132234717431115), + KQU(3286596588617483450), KQU(7726163455370818765), + KQU(7575454721115379328), KQU(5308331576437663422), + KQU(18288821894903530934), KQU(8028405805410554106), + KQU(15744019832103296628), KQU(149765559630932100), + KQU(6137705557200071977), KQU(14513416315434803615), + KQU(11665702820128984473), KQU(218926670505601386), + KQU(6868675028717769519), KQU(15282016569441512302), + KQU(5707000497782960236), KQU(6671120586555079567), + KQU(2194098052618985448), KQU(16849577895477330978), + KQU(12957148471017466283), KQU(1997805535404859393), + KQU(1180721060263860490), KQU(13206391310193756958), + KQU(12980208674461861797), KQU(3825967775058875366), + KQU(17543433670782042631), KQU(1518339070120322730), + KQU(16344584340890991669), KQU(2611327165318529819), + KQU(11265022723283422529), KQU(4001552800373196817), + KQU(14509595890079346161), KQU(3528717165416234562), + KQU(18153222571501914072), KQU(9387182977209744425), + KQU(10064342315985580021), KQU(11373678413215253977), + KQU(2308457853228798099), KQU(9729042942839545302), + KQU(7833785471140127746), KQU(6351049900319844436), + KQU(14454610627133496067), KQU(12533175683634819111), + KQU(15570163926716513029), KQU(13356980519185762498)}; TEST_BEGIN(test_gen_rand_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); - int i; + int i; uint32_t r32; - sfmt_t *ctx; + sfmt_t *ctx; - expect_d_le(get_min_array_size32(), BLOCK_SIZE, - "Array size too small"); + expect_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_gen_rand(1234); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); @@ -1486,13 +1405,12 @@ TEST_END TEST_BEGIN(test_by_array_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); - int i; + int i; uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; uint32_t r32; - sfmt_t *ctx; + sfmt_t *ctx; - expect_d_le(get_min_array_size32(), BLOCK_SIZE, - "Array size too small"); + expect_d_le(get_min_array_size32(), BLOCK_SIZE, "Array size too small"); ctx = init_by_array(ini, 4); fill_array32(ctx, array32, BLOCK_SIZE); fill_array32(ctx, array32_2, BLOCK_SIZE); @@ -1521,12 +1439,12 @@ TEST_END TEST_BEGIN(test_gen_rand_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); - int i; + int i; uint64_t r; - sfmt_t *ctx; + sfmt_t *ctx; - expect_d_le(get_min_array_size64(), BLOCK_SIZE64, - "Array size too small"); + expect_d_le( + get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_gen_rand(4321); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); @@ -1540,13 +1458,13 @@ TEST_BEGIN(test_gen_rand_64) { } r = gen_rand64(ctx); expect_u64_eq(r, array64[i], - "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i, + "Mismatch at array64[%d]=%" FMTx64 ", gen=%" FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); expect_u64_eq(r, array64_2[i], - "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i, + "Mismatch at array64_2[%d]=%" FMTx64 " gen=%" FMTx64 "", i, array64_2[i], r); } fini_gen_rand(ctx); @@ -1556,13 +1474,13 @@ TEST_END TEST_BEGIN(test_by_array_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); - int i; + int i; uint64_t r; uint32_t ini[] = {5, 4, 3, 2, 1}; - sfmt_t *ctx; + sfmt_t *ctx; - expect_d_le(get_min_array_size64(), BLOCK_SIZE64, - "Array size too small"); + expect_d_le( + get_min_array_size64(), BLOCK_SIZE64, "Array size too small"); ctx = init_by_array(ini, 5); fill_array64(ctx, array64, BLOCK_SIZE64); fill_array64(ctx, array64_2, BLOCK_SIZE64); @@ -1576,13 +1494,13 @@ TEST_BEGIN(test_by_array_64) { } r = gen_rand64(ctx); expect_u64_eq(r, array64[i], - "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i, + "Mismatch at array64[%d]=%" FMTx64 " gen=%" FMTx64, i, array64[i], r); } for (i = 0; i < COUNT_2; i++) { r = gen_rand64(ctx); expect_u64_eq(r, array64_2[i], - "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i, + "Mismatch at array64_2[%d]=%" FMTx64 " gen=%" FMTx64, i, array64_2[i], r); } fini_gen_rand(ctx); @@ -1591,9 +1509,6 @@ TEST_END int main(void) { - return test( - test_gen_rand_32, - test_by_array_32, - test_gen_rand_64, + return test(test_gen_rand_32, test_by_array_32, test_gen_rand_64, test_by_array_64); } diff --git a/test/unit/a0.c b/test/unit/a0.c index c1be79a6..63d792d2 100644 --- a/test/unit/a0.c +++ b/test/unit/a0.c @@ -11,6 +11,5 @@ TEST_END int main(void) { - return test_no_malloc_init( - test_a0); + return test_no_malloc_init(test_a0); } diff --git a/test/unit/arena_decay.c b/test/unit/arena_decay.c index 177ba505..99c08ab9 100644 --- a/test/unit/arena_decay.c +++ b/test/unit/arena_decay.c @@ -4,11 +4,11 @@ #include "jemalloc/internal/ticker.h" static nstime_monotonic_t *nstime_monotonic_orig; -static nstime_update_t *nstime_update_orig; +static nstime_update_t *nstime_update_orig; static unsigned nupdates_mock; static nstime_t time_mock; -static bool monotonic_mock; +static bool monotonic_mock; static bool nstime_monotonic_mock(void) { @@ -28,26 +28,27 @@ TEST_BEGIN(test_decay_ticks) { test_skip_if(opt_hpa); ticker_geom_t *decay_ticker; - unsigned tick0, tick1, arena_ind; - size_t sz, large0; - void *p; + unsigned tick0, tick1, arena_ind; + size_t sz, large0; + void *p; sz = sizeof(size_t); - expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); /* Set up a manually managed arena for test. */ arena_ind = do_arena_create(0, 0); /* Migrate to the new arena, and get the ticker. */ unsigned old_arena_ind; - size_t sz_arena_ind = sizeof(old_arena_ind); + size_t sz_arena_ind = sizeof(old_arena_ind); expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, - &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0, - "Unexpected mallctl() failure"); + &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), + 0, "Unexpected mallctl() failure"); decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch()); - expect_ptr_not_null(decay_ticker, - "Unexpected failure getting decay ticker"); + expect_ptr_not_null( + decay_ticker, "Unexpected failure getting decay ticker"); /* * Test the standard APIs using a large size class, since we can't @@ -80,8 +81,8 @@ TEST_BEGIN(test_decay_ticks) { expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, "Unexpected posix_memalign() failure"); tick1 = ticker_geom_read(decay_ticker); - expect_u32_ne(tick1, tick0, - "Expected ticker to tick during posix_memalign()"); + expect_u32_ne( + tick1, tick0, "Expected ticker to tick during posix_memalign()"); free(p); /* aligned_alloc(). */ @@ -89,8 +90,8 @@ TEST_BEGIN(test_decay_ticks) { p = aligned_alloc(sizeof(size_t), large0); expect_ptr_not_null(p, "Unexpected aligned_alloc() failure"); tick1 = ticker_geom_read(decay_ticker); - expect_u32_ne(tick1, tick0, - "Expected ticker to tick during aligned_alloc()"); + expect_u32_ne( + tick1, tick0, "Expected ticker to tick during aligned_alloc()"); free(p); /* realloc(). */ @@ -118,7 +119,7 @@ TEST_BEGIN(test_decay_ticks) { */ { unsigned i; - size_t allocx_sizes[2]; + size_t allocx_sizes[2]; allocx_sizes[0] = large0; allocx_sizes[1] = 1; @@ -163,7 +164,8 @@ TEST_BEGIN(test_decay_ticks) { tick1 = ticker_geom_read(decay_ticker); expect_u32_ne(tick1, tick0, "Expected ticker to tick during sdallocx() " - "(sz=%zu)", sz); + "(sz=%zu)", + sz); } } @@ -172,18 +174,19 @@ TEST_BEGIN(test_decay_ticks) { * using an explicit tcache. */ unsigned tcache_ind, i; - size_t tcache_sizes[2]; + size_t tcache_sizes[2]; tcache_sizes[0] = large0; tcache_sizes[1] = 1; size_t tcache_max, sz_tcache_max; sz_tcache_max = sizeof(tcache_max); expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, - &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure"); + &sz_tcache_max, NULL, 0), + 0, "Unexpected mallctl() failure"); sz = sizeof(unsigned); - expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { sz = tcache_sizes[i]; @@ -195,13 +198,14 @@ TEST_BEGIN(test_decay_ticks) { tick1 = ticker_geom_read(decay_ticker); expect_u32_ne(tick1, tick0, "Expected ticker to tick during tcache fill " - "(sz=%zu)", sz); + "(sz=%zu)", + sz); /* tcache flush. */ dallocx(p, MALLOCX_TCACHE(tcache_ind)); tick0 = ticker_geom_read(decay_ticker); expect_d_eq(mallctl("tcache.flush", NULL, NULL, - (void *)&tcache_ind, sizeof(unsigned)), 0, - "Unexpected mallctl failure"); + (void *)&tcache_ind, sizeof(unsigned)), + 0, "Unexpected mallctl failure"); tick1 = ticker_geom_read(decay_ticker); /* Will only tick if it's in tcache. */ @@ -231,11 +235,11 @@ decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt, * cached slab were to repeatedly come and go during looping, it could * prevent the decay backlog ever becoming empty. */ - void *p = do_mallocx(1, flags); + void *p = do_mallocx(1, flags); uint64_t dirty_npurge1, muzzy_npurge1; do { for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2; - i++) { + i++) { void *q = do_mallocx(1, flags); dallocx(q, flags); } @@ -244,14 +248,15 @@ decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt, nstime_add(&time_mock, &update_interval); nstime_update(&time); - } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 == - dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) || - !terminate_asap)); + } while (nstime_compare(&time, &deadline) <= 0 + && ((dirty_npurge1 == dirty_npurge0 + && muzzy_npurge1 == muzzy_npurge0) + || !terminate_asap)); dallocx(p, flags); if (config_stats) { - expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 + - muzzy_npurge0, "Expected purging to occur"); + expect_u64_gt(dirty_npurge1 + muzzy_npurge1, + dirty_npurge0 + muzzy_npurge0, "Expected purging to occur"); } #undef NINTERVALS } @@ -260,11 +265,11 @@ TEST_BEGIN(test_decay_ticker) { test_skip_if(is_background_thread_enabled()); test_skip_if(opt_hpa); #define NPS 2048 - ssize_t ddt = opt_dirty_decay_ms; - ssize_t mdt = opt_muzzy_decay_ms; + ssize_t ddt = opt_dirty_decay_ms; + ssize_t mdt = opt_muzzy_decay_ms; unsigned arena_ind = do_arena_create(ddt, mdt); - int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); - void *ps[NPS]; + int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; /* * Allocate a bunch of large objects, pause the clock, deallocate every @@ -274,8 +279,9 @@ TEST_BEGIN(test_decay_ticker) { */ size_t large; size_t sz = sizeof(size_t); - expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); do_purge(arena_ind); uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind); @@ -302,9 +308,9 @@ TEST_BEGIN(test_decay_ticker) { "Expected nstime_update() to be called"); } - decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0, - muzzy_npurge0, true); - decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0, + decay_ticker_helper( + arena_ind, flags, true, ddt, dirty_npurge0, muzzy_npurge0, true); + decay_ticker_helper(arena_ind, flags, false, ddt + mdt, dirty_npurge0, muzzy_npurge0, false); do_arena_destroy(arena_ind); @@ -319,16 +325,17 @@ TEST_BEGIN(test_decay_nonmonotonic) { test_skip_if(is_background_thread_enabled()); test_skip_if(opt_hpa); #define NPS (SMOOTHSTEP_NSTEPS + 1) - int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); - void *ps[NPS]; + int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; uint64_t npurge0 = 0; uint64_t npurge1 = 0; - size_t sz, large0; + size_t sz, large0; unsigned i, nupdates0; sz = sizeof(size_t); - expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); @@ -380,15 +387,15 @@ TEST_BEGIN(test_decay_now) { unsigned arena_ind = do_arena_create(0, 0); expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); - size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; + size_t sizes[] = {16, PAGE << 2, HUGEPAGE << 2}; /* Verify that dirty/muzzy pages never linger after deallocation. */ - for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + for (unsigned i = 0; i < sizeof(sizes) / sizeof(size_t); i++) { size_t size = sizes[i]; generate_dirty(arena_ind, size); - expect_zu_eq(get_arena_pdirty(arena_ind), 0, - "Unexpected dirty pages"); - expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, - "Unexpected muzzy pages"); + expect_zu_eq( + get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); + expect_zu_eq( + get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); } do_arena_destroy(arena_ind); } @@ -399,12 +406,12 @@ TEST_BEGIN(test_decay_never) { test_skip_if(opt_hpa); unsigned arena_ind = do_arena_create(-1, -1); - int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); - size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; - void *ptrs[sizeof(sizes)/sizeof(size_t)]; - for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + size_t sizes[] = {16, PAGE << 2, HUGEPAGE << 2}; + void *ptrs[sizeof(sizes) / sizeof(size_t)]; + for (unsigned i = 0; i < sizeof(sizes) / sizeof(size_t); i++) { ptrs[i] = do_mallocx(sizes[i], flags); } /* Verify that each deallocation generates additional dirty pages. */ @@ -419,7 +426,7 @@ TEST_BEGIN(test_decay_never) { expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages"); } expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages"); - for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + for (unsigned i = 0; i < sizeof(sizes) / sizeof(size_t); i++) { dallocx(ptrs[i], flags); size_t pdirty = get_arena_pdirty(arena_ind); size_t pmuzzy = get_arena_pmuzzy(arena_ind); @@ -434,10 +441,6 @@ TEST_END int main(void) { - return test( - test_decay_ticks, - test_decay_ticker, - test_decay_nonmonotonic, - test_decay_now, - test_decay_never); + return test(test_decay_ticks, test_decay_ticker, + test_decay_nonmonotonic, test_decay_now, test_decay_never); } diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index 42fa9a5d..3e0f3d75 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -1,5 +1,5 @@ #ifndef ARENA_RESET_PROF_C_ -#include "test/jemalloc_test.h" +# include "test/jemalloc_test.h" #endif #include "jemalloc/internal/extent_mmap.h" @@ -10,7 +10,7 @@ static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; - size_t z; + size_t z; z = sizeof(unsigned); expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, @@ -37,12 +37,12 @@ get_size_impl(const char *cmd, size_t ind) { size_t miblen = 4; z = sizeof(size_t); - expect_d_eq(mallctlnametomib(cmd, mib, &miblen), - 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + expect_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, + "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), - 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return ret; } @@ -61,8 +61,8 @@ get_large_size(size_t ind) { static size_t vsalloc(tsdn_t *tsdn, const void *ptr) { emap_full_alloc_ctx_t full_alloc_ctx; - bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global, - ptr, &full_alloc_ctx); + bool missing = emap_full_alloc_ctx_try_lookup( + tsdn, &arena_emap_global, ptr, &full_alloc_ctx); if (missing) { return 0; } @@ -84,20 +84,21 @@ vsalloc(tsdn_t *tsdn, const void *ptr) { static unsigned do_arena_create(extent_hooks_t *h) { unsigned arena_ind; - size_t sz = sizeof(unsigned); - expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, - (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, - "Unexpected mallctl() failure"); + size_t sz = sizeof(unsigned); + expect_d_eq( + mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), + 0, "Unexpected mallctl() failure"); return arena_ind; } static void do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { -#define NLARGE 32 +#define NLARGE 32 unsigned nsmall, nlarge, i; - size_t sz; - int flags; - tsdn_t *tsdn; + size_t sz; + int flags; + tsdn_t *tsdn; flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; @@ -132,14 +133,14 @@ do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { static void do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { - tsdn_t *tsdn; + tsdn_t *tsdn; unsigned i; tsdn = tsdn_fetch(); if (have_background_thread) { - malloc_mutex_lock(tsdn, - &background_thread_info_get(arena_ind)->mtx); + malloc_mutex_lock( + tsdn, &background_thread_info_get(arena_ind)->mtx); } /* Verify allocations no longer exist. */ for (i = 0; i < nptrs; i++) { @@ -147,8 +148,8 @@ do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { "Allocation should no longer exist"); } if (have_background_thread) { - malloc_mutex_unlock(tsdn, - &background_thread_info_get(arena_ind)->mtx); + malloc_mutex_unlock( + tsdn, &background_thread_info_get(arena_ind)->mtx); } free(ptrs); @@ -159,7 +160,7 @@ do_arena_reset_destroy(const char *name, unsigned arena_ind) { size_t mib[3]; size_t miblen; - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib(name, mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; @@ -179,7 +180,7 @@ do_arena_destroy(unsigned arena_ind) { TEST_BEGIN(test_arena_reset) { unsigned arena_ind; - void **ptrs; + void **ptrs; unsigned nptrs; arena_ind = do_arena_create(NULL); @@ -191,23 +192,25 @@ TEST_END static bool arena_i_initialized(unsigned arena_ind, bool refresh) { - bool initialized; + bool initialized; size_t mib[3]; size_t miblen, sz; if (refresh) { uint64_t epoch = 1; - expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, - sizeof(epoch)), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); } - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; sz = sizeof(initialized); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL, - 0), 0, "Unexpected mallctlbymib() failure"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); return initialized; } @@ -220,7 +223,7 @@ TEST_END TEST_BEGIN(test_arena_destroy_hooks_default) { unsigned arena_ind, arena_ind_another, arena_ind_prev; - void **ptrs; + void **ptrs; unsigned nptrs; arena_ind = do_arena_create(NULL); @@ -249,26 +252,27 @@ TEST_BEGIN(test_arena_destroy_hooks_default) { arena_ind_prev = arena_ind; arena_ind = do_arena_create(NULL); do_arena_reset_pre(arena_ind, &ptrs, &nptrs); - expect_u_eq(arena_ind, arena_ind_prev, - "Arena index should have been recycled"); + expect_u_eq( + arena_ind, arena_ind_prev, "Arena index should have been recycled"); do_arena_destroy(arena_ind); do_arena_reset_post(ptrs, nptrs, arena_ind); do_arena_destroy(arena_ind_another); /* Try arena.create with custom hooks. */ - size_t sz = sizeof(extent_hooks_t *); + size_t sz = sizeof(extent_hooks_t *); extent_hooks_t *a0_default_hooks; expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks, - &sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure"); + &sz, NULL, 0), + 0, "Unexpected mallctlnametomib() failure"); /* Default impl; but wrapped as "customized". */ - extent_hooks_t new_hooks = *a0_default_hooks; + extent_hooks_t new_hooks = *a0_default_hooks; extent_hooks_t *hook = &new_hooks; sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, - (void *)&hook, sizeof(void *)), 0, - "Unexpected mallctl() failure"); + (void *)&hook, sizeof(void *)), + 0, "Unexpected mallctl() failure"); do_arena_destroy(arena_ind); } TEST_END @@ -280,13 +284,15 @@ TEST_END static bool extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { - TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " - "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? - "true" : "false", arena_ind); + TRACE_HOOK( + "%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", + __func__, extent_hooks, addr, size, committed ? "true" : "false", + arena_ind); expect_ptr_eq(extent_hooks, &hooks, "extent_hooks should be same as pointer used to set hooks"); - expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap, - "Wrong hook function"); + expect_ptr_eq( + extent_hooks->dalloc, extent_dalloc_unmap, "Wrong hook function"); called_dalloc = true; if (!try_dalloc) { return true; @@ -301,21 +307,15 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, static extent_hooks_t hooks_orig; -static extent_hooks_t hooks_unmap = { - extent_alloc_hook, - extent_dalloc_unmap, /* dalloc */ - extent_destroy_hook, - extent_commit_hook, - extent_decommit_hook, - extent_purge_lazy_hook, - extent_purge_forced_hook, - extent_split_hook, - extent_merge_hook -}; +static extent_hooks_t hooks_unmap = {extent_alloc_hook, + extent_dalloc_unmap, /* dalloc */ + extent_destroy_hook, extent_commit_hook, extent_decommit_hook, + extent_purge_lazy_hook, extent_purge_forced_hook, extent_split_hook, + extent_merge_hook}; TEST_BEGIN(test_arena_destroy_hooks_unmap) { unsigned arena_ind; - void **ptrs; + void **ptrs; unsigned nptrs; extent_hooks_prep(); @@ -353,9 +353,6 @@ TEST_END int main(void) { - return test( - test_arena_reset, - test_arena_destroy_initial, - test_arena_destroy_hooks_default, - test_arena_destroy_hooks_unmap); + return test(test_arena_reset, test_arena_destroy_initial, + test_arena_destroy_hooks_default, test_arena_destroy_hooks_unmap); } diff --git a/test/unit/atomic.c b/test/unit/atomic.c index 6c4b85e5..b4f59431 100644 --- a/test/unit/atomic.c +++ b/test/unit/atomic.c @@ -187,7 +187,6 @@ TEST_BEGIN(test_atomic_u64) { } TEST_END - TEST_STRUCT(uint32_t, u32); TEST_BEGIN(test_atomic_u32) { INTEGER_TEST_BODY(uint32_t, u32); @@ -212,7 +211,6 @@ TEST_BEGIN(test_atomic_zd) { } TEST_END - TEST_STRUCT(unsigned, u); TEST_BEGIN(test_atomic_u) { INTEGER_TEST_BODY(unsigned, u); @@ -221,11 +219,6 @@ TEST_END int main(void) { - return test( - test_atomic_u64, - test_atomic_u32, - test_atomic_p, - test_atomic_zu, - test_atomic_zd, - test_atomic_u); + return test(test_atomic_u64, test_atomic_u32, test_atomic_p, + test_atomic_zu, test_atomic_zd, test_atomic_u); } diff --git a/test/unit/background_thread.c b/test/unit/background_thread.c index c60010a8..819a81a6 100644 --- a/test/unit/background_thread.c +++ b/test/unit/background_thread.c @@ -4,14 +4,13 @@ static void test_switch_background_thread_ctl(bool new_val) { - bool e0, e1; + bool e0, e1; size_t sz = sizeof(bool); e1 = new_val; - expect_d_eq(mallctl("background_thread", (void *)&e0, &sz, - &e1, sz), 0, "Unexpected mallctl() failure"); - expect_b_eq(e0, !e1, - "background_thread should be %d before.\n", !e1); + expect_d_eq(mallctl("background_thread", (void *)&e0, &sz, &e1, sz), 0, + "Unexpected mallctl() failure"); + expect_b_eq(e0, !e1, "background_thread should be %d before.\n", !e1); if (e1) { expect_zu_gt(n_background_threads, 0, "Number of background threads should be non zero.\n"); @@ -23,14 +22,13 @@ test_switch_background_thread_ctl(bool new_val) { static void test_repeat_background_thread_ctl(bool before) { - bool e0, e1; + bool e0, e1; size_t sz = sizeof(bool); e1 = before; - expect_d_eq(mallctl("background_thread", (void *)&e0, &sz, - &e1, sz), 0, "Unexpected mallctl() failure"); - expect_b_eq(e0, before, - "background_thread should be %d.\n", before); + expect_d_eq(mallctl("background_thread", (void *)&e0, &sz, &e1, sz), 0, + "Unexpected mallctl() failure"); + expect_b_eq(e0, before, "background_thread should be %d.\n", before); if (e1) { expect_zu_gt(n_background_threads, 0, "Number of background threads should be non zero.\n"); @@ -43,15 +41,15 @@ test_repeat_background_thread_ctl(bool before) { TEST_BEGIN(test_background_thread_ctl) { test_skip_if(!have_background_thread); - bool e0, e1; + bool e0, e1; size_t sz = sizeof(bool); - expect_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); - expect_d_eq(mallctl("background_thread", (void *)&e1, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); - expect_b_eq(e0, e1, - "Default and opt.background_thread does not match.\n"); + expect_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("background_thread", (void *)&e1, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + expect_b_eq( + e0, e1, "Default and opt.background_thread does not match.\n"); if (e0) { test_switch_background_thread_ctl(false); } @@ -75,7 +73,7 @@ TEST_BEGIN(test_background_thread_running) { test_skip_if(!config_stats); #if defined(JEMALLOC_BACKGROUND_THREAD) - tsd_t *tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch(); background_thread_info_t *info = &background_thread_info[0]; test_repeat_background_thread_ctl(false); @@ -113,6 +111,5 @@ int main(void) { /* Background_thread creation tests reentrancy naturally. */ return test_no_reentrancy( - test_background_thread_ctl, - test_background_thread_running); + test_background_thread_ctl, test_background_thread_running); } diff --git a/test/unit/background_thread_enable.c b/test/unit/background_thread_enable.c index 3a2d55ac..57f26c4b 100644 --- a/test/unit/background_thread_enable.c +++ b/test/unit/background_thread_enable.c @@ -1,6 +1,7 @@ #include "test/jemalloc_test.h" -const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:8"; +const char *malloc_conf = + "background_thread:false,narenas:1,max_background_threads:8"; static unsigned max_test_narenas(void) { @@ -21,14 +22,14 @@ TEST_BEGIN(test_deferred) { test_skip_if(!have_background_thread); unsigned id; - size_t sz_u = sizeof(unsigned); + size_t sz_u = sizeof(unsigned); for (unsigned i = 0; i < max_test_narenas(); i++) { expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, "Failed to create arena"); } - bool enable = true; + bool enable = true; size_t sz_b = sizeof(bool); expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to enable background threads"); @@ -44,29 +45,32 @@ TEST_BEGIN(test_max_background_threads) { size_t max_n_thds; size_t opt_max_n_thds; size_t sz_m = sizeof(max_n_thds); - expect_d_eq(mallctl("opt.max_background_threads", - &opt_max_n_thds, &sz_m, NULL, 0), 0, - "Failed to get opt.max_background_threads"); - expect_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL, - 0), 0, "Failed to get max background threads"); + expect_d_eq(mallctl("opt.max_background_threads", &opt_max_n_thds, + &sz_m, NULL, 0), + 0, "Failed to get opt.max_background_threads"); + expect_d_eq( + mallctl("max_background_threads", &max_n_thds, &sz_m, NULL, 0), 0, + "Failed to get max background threads"); expect_zu_eq(opt_max_n_thds, max_n_thds, "max_background_threads and " "opt.max_background_threads should match"); - expect_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds, - sz_m), 0, "Failed to set max background threads"); + expect_d_eq( + mallctl("max_background_threads", NULL, NULL, &max_n_thds, sz_m), 0, + "Failed to set max background threads"); size_t size_zero = 0; - expect_d_ne(mallctl("max_background_threads", NULL, NULL, &size_zero, - sz_m), 0, "Should not allow zero background threads"); + expect_d_ne( + mallctl("max_background_threads", NULL, NULL, &size_zero, sz_m), 0, + "Should not allow zero background threads"); unsigned id; - size_t sz_u = sizeof(unsigned); + size_t sz_u = sizeof(unsigned); for (unsigned i = 0; i < max_test_narenas(); i++) { expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, "Failed to create arena"); } - bool enable = true; + bool enable = true; size_t sz_b = sizeof(bool); expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, "Failed to enable background threads"); @@ -75,16 +79,18 @@ TEST_BEGIN(test_max_background_threads) { size_t new_max_thds = max_n_thds - 1; if (new_max_thds > 0) { expect_d_eq(mallctl("max_background_threads", NULL, NULL, - &new_max_thds, sz_m), 0, - "Failed to set max background threads"); + &new_max_thds, sz_m), + 0, "Failed to set max background threads"); expect_zu_eq(n_background_threads, new_max_thds, "Number of background threads should decrease by 1.\n"); } new_max_thds = 1; - expect_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds, - sz_m), 0, "Failed to set max background threads"); - expect_d_ne(mallctl("max_background_threads", NULL, NULL, &size_zero, - sz_m), 0, "Should not allow zero background threads"); + expect_d_eq( + mallctl("max_background_threads", NULL, NULL, &new_max_thds, sz_m), + 0, "Failed to set max background threads"); + expect_d_ne( + mallctl("max_background_threads", NULL, NULL, &size_zero, sz_m), 0, + "Should not allow zero background threads"); expect_zu_eq(n_background_threads, new_max_thds, "Number of background threads should be 1.\n"); } @@ -92,7 +98,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_deferred, - test_max_background_threads); + return test_no_reentrancy(test_deferred, test_max_background_threads); } diff --git a/test/unit/base.c b/test/unit/base.c index 3e46626e..e6e82435 100644 --- a/test/unit/base.c +++ b/test/unit/base.c @@ -3,37 +3,31 @@ #include "test/extent_hooks.h" static extent_hooks_t hooks_null = { - extent_alloc_hook, - NULL, /* dalloc */ - NULL, /* destroy */ - NULL, /* commit */ - NULL, /* decommit */ - NULL, /* purge_lazy */ - NULL, /* purge_forced */ - NULL, /* split */ - NULL /* merge */ + extent_alloc_hook, NULL, /* dalloc */ + NULL, /* destroy */ + NULL, /* commit */ + NULL, /* decommit */ + NULL, /* purge_lazy */ + NULL, /* purge_forced */ + NULL, /* split */ + NULL /* merge */ }; static extent_hooks_t hooks_not_null = { - extent_alloc_hook, - extent_dalloc_hook, - extent_destroy_hook, - NULL, /* commit */ - extent_decommit_hook, - extent_purge_lazy_hook, - extent_purge_forced_hook, - NULL, /* split */ - NULL /* merge */ + extent_alloc_hook, extent_dalloc_hook, extent_destroy_hook, + NULL, /* commit */ + extent_decommit_hook, extent_purge_lazy_hook, extent_purge_forced_hook, + NULL, /* split */ + NULL /* merge */ }; TEST_BEGIN(test_base_hooks_default) { base_t *base; - size_t allocated0, allocated1, edata_allocated, - rtree_allocated, resident, mapped, n_thp; + size_t allocated0, allocated1, edata_allocated, rtree_allocated, + resident, mapped, n_thp; tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - base = base_new(tsdn, 0, - (extent_hooks_t *)&ehooks_default_extent_hooks, + base = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks, /* metadata_use_hooks */ true); if (config_stats) { @@ -42,13 +36,13 @@ TEST_BEGIN(test_base_hooks_default) { expect_zu_ge(allocated0, sizeof(base_t), "Base header should count as allocated"); if (opt_metadata_thp == metadata_thp_always) { - expect_zu_gt(n_thp, 0, - "Base should have 1 THP at least."); + expect_zu_gt( + n_thp, 0, "Base should have 1 THP at least."); } } - expect_ptr_not_null(base_alloc(tsdn, base, 42, 1), - "Unexpected base_alloc() failure"); + expect_ptr_not_null( + base_alloc(tsdn, base, 42, 1), "Unexpected base_alloc() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated1, &edata_allocated, @@ -63,9 +57,9 @@ TEST_END TEST_BEGIN(test_base_hooks_null) { extent_hooks_t hooks_orig; - base_t *base; - size_t allocated0, allocated1, edata_allocated, - rtree_allocated, resident, mapped, n_thp; + base_t *base; + size_t allocated0, allocated1, edata_allocated, rtree_allocated, + resident, mapped, n_thp; extent_hooks_prep(); try_dalloc = false; @@ -86,13 +80,13 @@ TEST_BEGIN(test_base_hooks_null) { expect_zu_ge(allocated0, sizeof(base_t), "Base header should count as allocated"); if (opt_metadata_thp == metadata_thp_always) { - expect_zu_gt(n_thp, 0, - "Base should have 1 THP at least."); + expect_zu_gt( + n_thp, 0, "Base should have 1 THP at least."); } } - expect_ptr_not_null(base_alloc(tsdn, base, 42, 1), - "Unexpected base_alloc() failure"); + expect_ptr_not_null( + base_alloc(tsdn, base, 42, 1), "Unexpected base_alloc() failure"); if (config_stats) { base_stats_get(tsdn, base, &allocated1, &edata_allocated, @@ -109,8 +103,8 @@ TEST_END TEST_BEGIN(test_base_hooks_not_null) { extent_hooks_t hooks_orig; - base_t *base; - void *p, *q, *r, *r_exp; + base_t *base; + void *p, *q, *r, *r_exp; extent_hooks_prep(); try_dalloc = false; @@ -133,33 +127,34 @@ TEST_BEGIN(test_base_hooks_not_null) { */ { const size_t alignments[] = { - 1, - QUANTUM, - QUANTUM << 1, - CACHELINE, - CACHELINE << 1, + 1, + QUANTUM, + QUANTUM << 1, + CACHELINE, + CACHELINE << 1, }; unsigned i; for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { size_t alignment = alignments[i]; - size_t align_ceil = ALIGNMENT_CEILING(alignment, - QUANTUM); + size_t align_ceil = ALIGNMENT_CEILING( + alignment, QUANTUM); p = base_alloc(tsdn, base, 1, alignment); - expect_ptr_not_null(p, - "Unexpected base_alloc() failure"); + expect_ptr_not_null( + p, "Unexpected base_alloc() failure"); expect_ptr_eq(p, - (void *)(ALIGNMENT_CEILING((uintptr_t)p, - alignment)), "Expected quantum alignment"); + (void *)(ALIGNMENT_CEILING( + (uintptr_t)p, alignment)), + "Expected quantum alignment"); q = base_alloc(tsdn, base, alignment, alignment); - expect_ptr_not_null(q, - "Unexpected base_alloc() failure"); + expect_ptr_not_null( + q, "Unexpected base_alloc() failure"); expect_ptr_eq((void *)((uintptr_t)p + align_ceil), q, "Minimal allocation should take up %zu bytes", align_ceil); r = base_alloc(tsdn, base, 1, alignment); - expect_ptr_not_null(r, - "Unexpected base_alloc() failure"); + expect_ptr_not_null( + r, "Unexpected base_alloc() failure"); expect_ptr_eq((void *)((uintptr_t)q + align_ceil), r, "Minimal allocation should take up %zu bytes", align_ceil); @@ -193,21 +188,18 @@ TEST_BEGIN(test_base_hooks_not_null) { * Check for proper alignment support when normal blocks are too small. */ { - const size_t alignments[] = { - HUGEPAGE, - HUGEPAGE << 1 - }; - unsigned i; + const size_t alignments[] = {HUGEPAGE, HUGEPAGE << 1}; + unsigned i; for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { size_t alignment = alignments[i]; p = base_alloc(tsdn, base, QUANTUM, alignment); - expect_ptr_not_null(p, - "Unexpected base_alloc() failure"); + expect_ptr_not_null( + p, "Unexpected base_alloc() failure"); expect_ptr_eq(p, - (void *)(ALIGNMENT_CEILING((uintptr_t)p, - alignment)), "Expected %zu-byte alignment", - alignment); + (void *)(ALIGNMENT_CEILING( + (uintptr_t)p, alignment)), + "Expected %zu-byte alignment", alignment); } } @@ -237,12 +229,11 @@ TEST_BEGIN(test_base_ehooks_get_for_metadata_default_hook) { base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ false); ehooks_t *ehooks = base_ehooks_get_for_metadata(base); expect_true(ehooks_are_default(ehooks), - "Expected default extent hook functions pointer"); + "Expected default extent hook functions pointer"); base_delete(tsdn, base); } TEST_END - TEST_BEGIN(test_base_ehooks_get_for_metadata_custom_hook) { extent_hooks_prep(); memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t)); @@ -251,17 +242,15 @@ TEST_BEGIN(test_base_ehooks_get_for_metadata_custom_hook) { base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true); ehooks_t *ehooks = base_ehooks_get_for_metadata(base); expect_ptr_eq(&hooks, ehooks_get_extent_hooks_ptr(ehooks), - "Expected user-specified extend hook functions pointer"); + "Expected user-specified extend hook functions pointer"); base_delete(tsdn, base); } TEST_END int main(void) { - return test( - test_base_hooks_default, - test_base_hooks_null, + return test(test_base_hooks_default, test_base_hooks_null, test_base_hooks_not_null, - test_base_ehooks_get_for_metadata_default_hook, - test_base_ehooks_get_for_metadata_custom_hook); + test_base_ehooks_get_for_metadata_default_hook, + test_base_ehooks_get_for_metadata_custom_hook); } diff --git a/test/unit/batch_alloc.c b/test/unit/batch_alloc.c index 2bd5968e..0c61bf77 100644 --- a/test/unit/batch_alloc.c +++ b/test/unit/batch_alloc.c @@ -6,8 +6,8 @@ static void *global_ptrs[BATCH_MAX]; #define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0) static void -verify_batch_basic(tsd_t *tsd, void **ptrs, size_t batch, size_t usize, - bool zero) { +verify_batch_basic( + tsd_t *tsd, void **ptrs, size_t batch, size_t usize, bool zero) { for (size_t i = 0; i < batch; ++i) { void *p = ptrs[i]; expect_zu_eq(isalloc(tsd_tsdn(tsd), p), usize, ""); @@ -46,7 +46,8 @@ verify_batch_locality(tsd_t *tsd, void **ptrs, size_t batch, size_t usize, assert(i > 0); void *q = ptrs[i - 1]; expect_true((uintptr_t)p > (uintptr_t)q - && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, ""); + && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, + ""); } } @@ -62,16 +63,17 @@ struct batch_alloc_packet_s { void **ptrs; size_t num; size_t size; - int flags; + int flags; }; static size_t batch_alloc_wrapper(void **ptrs, size_t num, size_t size, int flags) { batch_alloc_packet_t batch_alloc_packet = {ptrs, num, size, flags}; - size_t filled; - size_t len = sizeof(size_t); + size_t filled; + size_t len = sizeof(size_t); assert_d_eq(mallctl("experimental.batch_alloc", &filled, &len, - &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, ""); + &batch_alloc_packet, sizeof(batch_alloc_packet)), + 0, ""); return filled; } @@ -79,16 +81,16 @@ static void test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) { tsd_t *tsd = tsd_fetch(); assert(tsd != NULL); - const size_t usize = - (alignment != 0 ? sz_sa2u(size, alignment) : sz_s2u(size)); - const szind_t ind = sz_size2index(usize); + const size_t usize = (alignment != 0 ? sz_sa2u(size, alignment) + : sz_s2u(size)); + const szind_t ind = sz_size2index(usize); const bin_info_t *bin_info = &bin_infos[ind]; - const unsigned nregs = bin_info->nregs; + const unsigned nregs = bin_info->nregs; assert(nregs > 0); arena_t *arena; if (arena_flag != 0) { - arena = arena_get(tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag), - false); + arena = arena_get( + tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag), false); } else { arena = arena_choose(tsd, NULL); } @@ -122,13 +124,13 @@ test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) { } size_t batch = base + (size_t)j; assert(batch < BATCH_MAX); - size_t filled = batch_alloc_wrapper(global_ptrs, batch, - size, flags); + size_t filled = batch_alloc_wrapper( + global_ptrs, batch, size, flags); assert_zu_eq(filled, batch, ""); - verify_batch_basic(tsd, global_ptrs, batch, usize, - zero); - verify_batch_locality(tsd, global_ptrs, batch, usize, - arena, nregs); + verify_batch_basic( + tsd, global_ptrs, batch, usize, zero); + verify_batch_locality( + tsd, global_ptrs, batch, usize, arena, nregs); release_batch(global_ptrs, batch, usize); } } @@ -153,9 +155,10 @@ TEST_END TEST_BEGIN(test_batch_alloc_manual_arena) { unsigned arena_ind; - size_t len_unsigned = sizeof(unsigned); - assert_d_eq(mallctl("arenas.create", &arena_ind, &len_unsigned, NULL, - 0), 0, ""); + size_t len_unsigned = sizeof(unsigned); + assert_d_eq( + mallctl("arenas.create", &arena_ind, &len_unsigned, NULL, 0), 0, + ""); test_wrapper(11, 0, false, MALLOCX_ARENA(arena_ind)); } TEST_END @@ -180,10 +183,7 @@ TEST_END int main(void) { - return test( - test_batch_alloc, - test_batch_alloc_zero, - test_batch_alloc_aligned, - test_batch_alloc_manual_arena, + return test(test_batch_alloc, test_batch_alloc_zero, + test_batch_alloc_aligned, test_batch_alloc_manual_arena, test_batch_alloc_large); } diff --git a/test/unit/batcher.c b/test/unit/batcher.c index df9d3e5b..1052ca27 100644 --- a/test/unit/batcher.c +++ b/test/unit/batcher.c @@ -5,7 +5,7 @@ TEST_BEGIN(test_simple) { enum { NELEMS_MAX = 10, DATA_BASE_VAL = 100, NRUNS = 5 }; batcher_t batcher; - size_t data[NELEMS_MAX]; + size_t data[NELEMS_MAX]; for (size_t nelems = 0; nelems < NELEMS_MAX; nelems++) { batcher_init(&batcher, nelems); for (int run = 0; run < NRUNS; run++) { @@ -13,8 +13,8 @@ TEST_BEGIN(test_simple) { data[i] = (size_t)-1; } for (size_t i = 0; i < nelems; i++) { - size_t idx = batcher_push_begin(TSDN_NULL, - &batcher, 1); + size_t idx = batcher_push_begin( + TSDN_NULL, &batcher, 1); assert_zu_eq(i, idx, "Wrong index"); assert_zu_eq((size_t)-1, data[idx], "Expected uninitialized slot"); @@ -22,8 +22,8 @@ TEST_BEGIN(test_simple) { batcher_push_end(TSDN_NULL, &batcher); } if (nelems > 0) { - size_t idx = batcher_push_begin(TSDN_NULL, - &batcher, 1); + size_t idx = batcher_push_begin( + TSDN_NULL, &batcher, 1); assert_zu_eq(BATCHER_NO_IDX, idx, "Shouldn't be able to push into a full " "batcher"); @@ -51,7 +51,7 @@ TEST_BEGIN(test_simple) { TEST_END TEST_BEGIN(test_multi_push) { - size_t idx, nelems; + size_t idx, nelems; batcher_t batcher; batcher_init(&batcher, 11); /* Push two at a time, 5 times, for 10 total. */ @@ -82,13 +82,13 @@ enum { typedef struct stress_test_data_s stress_test_data_t; struct stress_test_data_s { - batcher_t batcher; - mtx_t pop_mtx; + batcher_t batcher; + mtx_t pop_mtx; atomic_u32_t thread_id; - uint32_t elems_data[STRESS_TEST_ELEMS]; - size_t push_count[STRESS_TEST_ELEMS]; - size_t pop_count[STRESS_TEST_ELEMS]; + uint32_t elems_data[STRESS_TEST_ELEMS]; + size_t push_count[STRESS_TEST_ELEMS]; + size_t pop_count[STRESS_TEST_ELEMS]; atomic_zu_t atomic_push_count[STRESS_TEST_ELEMS]; atomic_zu_t atomic_pop_count[STRESS_TEST_ELEMS]; }; @@ -108,7 +108,8 @@ get_nth_set(bool elems_owned[STRESS_TEST_ELEMS], size_t n) { return i; } } - assert_not_reached("Asked for the %zu'th set element when < %zu are " + assert_not_reached( + "Asked for the %zu'th set element when < %zu are " "set", n, n); /* Just to silence a compiler warning. */ @@ -118,20 +119,19 @@ get_nth_set(bool elems_owned[STRESS_TEST_ELEMS], size_t n) { static void * stress_test_thd(void *arg) { stress_test_data_t *data = arg; - size_t prng = atomic_fetch_add_u32(&data->thread_id, 1, - ATOMIC_RELAXED); + size_t prng = atomic_fetch_add_u32(&data->thread_id, 1, ATOMIC_RELAXED); size_t nelems_owned = 0; - bool elems_owned[STRESS_TEST_ELEMS] = {0}; + bool elems_owned[STRESS_TEST_ELEMS] = {0}; size_t local_push_count[STRESS_TEST_ELEMS] = {0}; size_t local_pop_count[STRESS_TEST_ELEMS] = {0}; for (int i = 0; i < STRESS_TEST_OPS; i++) { - size_t rnd = prng_range_zu(&prng, - STRESS_TEST_PUSH_TO_POP_RATIO); + size_t rnd = prng_range_zu( + &prng, STRESS_TEST_PUSH_TO_POP_RATIO); if (rnd == 0 || nelems_owned == 0) { - size_t nelems = batcher_pop_begin(TSDN_NULL, - &data->batcher); + size_t nelems = batcher_pop_begin( + TSDN_NULL, &data->batcher); if (nelems == BATCHER_NO_IDX) { continue; } @@ -147,19 +147,18 @@ stress_test_thd(void *arg) { } batcher_pop_end(TSDN_NULL, &data->batcher); } else { - size_t elem_to_push_idx = prng_range_zu(&prng, - nelems_owned); - size_t elem = get_nth_set(elems_owned, - elem_to_push_idx); - assert_true( - elems_owned[elem], + size_t elem_to_push_idx = prng_range_zu( + &prng, nelems_owned); + size_t elem = get_nth_set( + elems_owned, elem_to_push_idx); + assert_true(elems_owned[elem], "Should own element we're about to pop"); elems_owned[elem] = false; local_push_count[elem]++; data->push_count[elem]++; nelems_owned--; - size_t idx = batcher_push_begin(TSDN_NULL, - &data->batcher, 1); + size_t idx = batcher_push_begin( + TSDN_NULL, &data->batcher, 1); assert_zu_ne(idx, BATCHER_NO_IDX, "Batcher can't be full -- we have one of its " "elems!"); @@ -171,10 +170,10 @@ stress_test_thd(void *arg) { /* Push all local elems back, flush local counts to the shared ones. */ size_t push_idx = 0; if (nelems_owned != 0) { - push_idx = batcher_push_begin(TSDN_NULL, &data->batcher, - nelems_owned); - assert_zu_ne(BATCHER_NO_IDX, push_idx, - "Should be space to push"); + push_idx = batcher_push_begin( + TSDN_NULL, &data->batcher, nelems_owned); + assert_zu_ne( + BATCHER_NO_IDX, push_idx, "Should be space to push"); } for (size_t i = 0; i < STRESS_TEST_ELEMS; i++) { if (elems_owned[i]) { @@ -183,12 +182,10 @@ stress_test_thd(void *arg) { local_push_count[i]++; data->push_count[i]++; } - atomic_fetch_add_zu( - &data->atomic_push_count[i], local_push_count[i], - ATOMIC_RELAXED); - atomic_fetch_add_zu( - &data->atomic_pop_count[i], local_pop_count[i], - ATOMIC_RELAXED); + atomic_fetch_add_zu(&data->atomic_push_count[i], + local_push_count[i], ATOMIC_RELAXED); + atomic_fetch_add_zu(&data->atomic_pop_count[i], + local_pop_count[i], ATOMIC_RELAXED); } if (nelems_owned != 0) { batcher_push_end(TSDN_NULL, &data->batcher); @@ -223,8 +220,8 @@ TEST_BEGIN(test_stress) { thd_join(threads[i], NULL); } for (int i = 0; i < STRESS_TEST_ELEMS; i++) { - assert_zu_ne(0, data.push_count[i], - "Should have done something!"); + assert_zu_ne( + 0, data.push_count[i], "Should have done something!"); assert_zu_eq(data.push_count[i], data.pop_count[i], "every element should be pushed and popped an equal number " "of times"); diff --git a/test/unit/bin_batching.c b/test/unit/bin_batching.c index a20062fd..a422586d 100644 --- a/test/unit/bin_batching.c +++ b/test/unit/bin_batching.c @@ -9,10 +9,10 @@ enum { typedef struct stress_thread_data_s stress_thread_data_t; struct stress_thread_data_s { - unsigned thd_id; + unsigned thd_id; atomic_zu_t *ready_thds; atomic_zu_t *done_thds; - void **to_dalloc; + void **to_dalloc; }; static atomic_zu_t push_failure_count; @@ -68,19 +68,19 @@ increment_pop_attempt(size_t elems_to_pop) { static void increment_slab_dalloc_count(unsigned slab_dalloc_count, bool list_empty) { if (slab_dalloc_count > 0) { - atomic_fetch_add_zu(&dalloc_nonzero_slab_count, 1, - ATOMIC_RELAXED); + atomic_fetch_add_zu( + &dalloc_nonzero_slab_count, 1, ATOMIC_RELAXED); } else { - atomic_fetch_add_zu(&dalloc_zero_slab_count, 1, - ATOMIC_RELAXED); + atomic_fetch_add_zu(&dalloc_zero_slab_count, 1, ATOMIC_RELAXED); } if (!list_empty) { - atomic_fetch_add_zu(&dalloc_nonempty_list_count, 1, - ATOMIC_RELAXED); + atomic_fetch_add_zu( + &dalloc_nonempty_list_count, 1, ATOMIC_RELAXED); } } -static void flush_tcache() { +static void +flush_tcache() { assert_d_eq(0, mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), "Unexpected mallctl failure"); } @@ -88,7 +88,7 @@ static void flush_tcache() { static void * stress_thread(void *arg) { stress_thread_data_t *data = arg; - uint64_t prng_state = data->thd_id; + uint64_t prng_state = data->thd_id; atomic_fetch_add_zu(data->ready_thds, 1, ATOMIC_RELAXED); while (atomic_load_zu(data->ready_thds, ATOMIC_RELAXED) != STRESS_THREADS) { @@ -99,7 +99,6 @@ stress_thread(void *arg) { if (prng_range_u64(&prng_state, 3) == 0) { flush_tcache(); } - } flush_tcache(); atomic_fetch_add_zu(data->done_thds, 1, ATOMIC_RELAXED); @@ -125,9 +124,9 @@ stress_run(void (*main_thread_fn)(), int nruns) { atomic_store_zu(&dalloc_nonempty_list_count, 0, ATOMIC_RELAXED); for (int run = 0; run < nruns; run++) { - thd_t thds[STRESS_THREADS]; + thd_t thds[STRESS_THREADS]; stress_thread_data_t thd_datas[STRESS_THREADS]; - atomic_zu_t ready_thds; + atomic_zu_t ready_thds; atomic_store_zu(&ready_thds, 0, ATOMIC_RELAXED); atomic_zu_t done_thds; atomic_store_zu(&done_thds, 0, ATOMIC_RELAXED); @@ -164,7 +163,7 @@ stress_run(void (*main_thread_fn)(), int nruns) { static void do_allocs_frees() { - enum {NALLOCS = 32}; + enum { NALLOCS = 32 }; flush_tcache(); void *ptrs[NALLOCS]; for (int i = 0; i < NALLOCS; i++) { @@ -182,7 +181,7 @@ test_arena_reset_main_fn() { } TEST_BEGIN(test_arena_reset) { - int err; + int err; unsigned arena; unsigned old_arena; @@ -256,17 +255,16 @@ TEST_BEGIN(test_races) { "Should have seen some pop successes"); assert_zu_lt(0, atomic_load_zu(&dalloc_zero_slab_count, ATOMIC_RELAXED), "Expected some frees that didn't empty a slab"); - assert_zu_lt(0, atomic_load_zu(&dalloc_nonzero_slab_count, - ATOMIC_RELAXED), "expected some frees that emptied a slab"); - assert_zu_lt(0, atomic_load_zu(&dalloc_nonempty_list_count, - ATOMIC_RELAXED), "expected some frees that used the empty list"); + assert_zu_lt(0, + atomic_load_zu(&dalloc_nonzero_slab_count, ATOMIC_RELAXED), + "expected some frees that emptied a slab"); + assert_zu_lt(0, + atomic_load_zu(&dalloc_nonempty_list_count, ATOMIC_RELAXED), + "expected some frees that used the empty list"); } TEST_END int main(void) { - return test_no_reentrancy( - test_arena_reset, - test_races, - test_fork); + return test_no_reentrancy(test_arena_reset, test_races, test_fork); } diff --git a/test/unit/binshard.c b/test/unit/binshard.c index 040ea54d..c3e1c2d6 100644 --- a/test/unit/binshard.c +++ b/test/unit/binshard.c @@ -7,9 +7,9 @@ static void * thd_producer(void *varg) { - void **mem = varg; + void **mem = varg; unsigned arena, i; - size_t sz; + size_t sz; sz = sizeof(arena); /* Remote arena. */ @@ -28,8 +28,8 @@ thd_producer(void *varg) { } TEST_BEGIN(test_producer_consumer) { - thd_t thds[NTHREADS]; - void *mem[NTHREADS][REMOTE_NALLOC]; + thd_t thds[NTHREADS]; + void *mem[NTHREADS][REMOTE_NALLOC]; unsigned i; /* Create producer threads to allocate. */ @@ -42,8 +42,8 @@ TEST_BEGIN(test_producer_consumer) { /* Remote deallocation by the current thread. */ for (i = 0; i < NTHREADS; i++) { for (unsigned j = 0; j < REMOTE_NALLOC; j++) { - expect_ptr_not_null(mem[i][j], - "Unexpected remote allocation failure"); + expect_ptr_not_null( + mem[i][j], "Unexpected remote allocation failure"); dallocx(mem[i][j], 0); } } @@ -52,7 +52,7 @@ TEST_END static void * thd_start(void *varg) { - void *ptr, *ptr2; + void *ptr, *ptr2; edata_t *edata; unsigned shard1, shard2; @@ -82,10 +82,10 @@ thd_start(void *varg) { } TEST_BEGIN(test_bin_shard_mt) { - test_skip_if(have_percpu_arena && - PERCPU_ARENA_ENABLED(opt_percpu_arena)); + test_skip_if( + have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); - thd_t thds[NTHREADS]; + thd_t thds[NTHREADS]; unsigned i; for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, NULL); @@ -104,8 +104,8 @@ TEST_END TEST_BEGIN(test_bin_shard) { unsigned nbins, i; - size_t mib[4], mib2[4]; - size_t miblen, miblen2, len; + size_t mib[4], mib2[4]; + size_t miblen, miblen2, len; len = sizeof(nbins); expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, @@ -120,17 +120,19 @@ TEST_BEGIN(test_bin_shard) { for (i = 0; i < nbins; i++) { uint32_t nshards; - size_t size, sz1, sz2; + size_t size, sz1, sz2; mib[2] = i; sz1 = sizeof(nshards); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1, - NULL, 0), 0, "Unexpected mallctlbymib() failure"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&nshards, &sz1, NULL, 0), + 0, "Unexpected mallctlbymib() failure"); mib2[2] = i; sz2 = sizeof(size); - expect_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2, - NULL, 0), 0, "Unexpected mallctlbymib() failure"); + expect_d_eq( + mallctlbymib(mib2, miblen2, (void *)&size, &sz2, NULL, 0), + 0, "Unexpected mallctlbymib() failure"); if (size >= 1 && size <= 128) { expect_u_eq(nshards, 16, "Unexpected nshards"); @@ -148,7 +150,5 @@ TEST_END int main(void) { return test_no_reentrancy( - test_bin_shard, - test_bin_shard_mt, - test_producer_consumer); + test_bin_shard, test_bin_shard_mt, test_producer_consumer); } diff --git a/test/unit/bit_util.c b/test/unit/bit_util.c index 4e9d2e16..986562d1 100644 --- a/test/unit/bit_util.c +++ b/test/unit/bit_util.c @@ -2,36 +2,37 @@ #include "jemalloc/internal/bit_util.h" -#define TEST_POW2_CEIL(t, suf, pri) do { \ - unsigned i, pow2; \ - t x; \ - \ - expect_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ - \ - for (i = 0; i < sizeof(t) * 8; i++) { \ - expect_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ - << i, "Unexpected result"); \ - } \ - \ - for (i = 2; i < sizeof(t) * 8; i++) { \ - expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ - ((t)1) << i, "Unexpected result"); \ - } \ - \ - for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ - expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ - ((t)1) << (i+1), "Unexpected result"); \ - } \ - \ - for (pow2 = 1; pow2 < 25; pow2++) { \ - for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ - x++) { \ - expect_##suf##_eq(pow2_ceil_##suf(x), \ - ((t)1) << pow2, \ - "Unexpected result, x=%"pri, x); \ - } \ - } \ -} while (0) +#define TEST_POW2_CEIL(t, suf, pri) \ + do { \ + unsigned i, pow2; \ + t x; \ + \ + expect_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ + \ + for (i = 0; i < sizeof(t) * 8; i++) { \ + expect_##suf##_eq(pow2_ceil_##suf(((t)1) << i), \ + ((t)1) << i, "Unexpected result"); \ + } \ + \ + for (i = 2; i < sizeof(t) * 8; i++) { \ + expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ + ((t)1) << i, "Unexpected result"); \ + } \ + \ + for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ + expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ + ((t)1) << (i + 1), "Unexpected result"); \ + } \ + \ + for (pow2 = 1; pow2 < 25; pow2++) { \ + for (x = (((t)1) << (pow2 - 1)) + 1; \ + x <= ((t)1) << pow2; x++) { \ + expect_##suf##_eq(pow2_ceil_##suf(x), \ + ((t)1) << pow2, \ + "Unexpected result, x=%" pri, x); \ + } \ + } \ + } while (0) TEST_BEGIN(test_pow2_ceil_u64) { TEST_POW2_CEIL(uint64_t, u64, FMTu64); @@ -54,10 +55,10 @@ expect_lg_ceil_range(size_t input, unsigned answer) { expect_u_eq(0, answer, "Got %u as lg_ceil of 1", answer); return; } - expect_zu_le(input, (ZU(1) << answer), - "Got %u as lg_ceil of %zu", answer, input); - expect_zu_gt(input, (ZU(1) << (answer - 1)), - "Got %u as lg_ceil of %zu", answer, input); + expect_zu_le(input, (ZU(1) << answer), "Got %u as lg_ceil of %zu", + answer, input); + expect_zu_gt(input, (ZU(1) << (answer - 1)), "Got %u as lg_ceil of %zu", + answer, input); } static void @@ -66,8 +67,8 @@ expect_lg_floor_range(size_t input, unsigned answer) { expect_u_eq(0, answer, "Got %u as lg_floor of 1", answer); return; } - expect_zu_ge(input, (ZU(1) << answer), - "Got %u as lg_floor of %zu", answer, input); + expect_zu_ge(input, (ZU(1) << answer), "Got %u as lg_floor of %zu", + answer, input); expect_zu_lt(input, (ZU(1) << (answer + 1)), "Got %u as lg_floor of %zu", answer, input); } @@ -101,22 +102,24 @@ TEST_BEGIN(test_lg_ceil_floor) { } TEST_END -#define TEST_FFS(t, suf, test_suf, pri) do { \ - for (unsigned i = 0; i < sizeof(t) * 8; i++) { \ - for (unsigned j = 0; j <= i; j++) { \ - for (unsigned k = 0; k <= j; k++) { \ - t x = (t)1 << i; \ - x |= (t)1 << j; \ - x |= (t)1 << k; \ - expect_##test_suf##_eq(ffs_##suf(x), k, \ - "Unexpected result, x=%"pri, x); \ - } \ - } \ - } \ -} while(0) +#define TEST_FFS(t, suf, test_suf, pri) \ + do { \ + for (unsigned i = 0; i < sizeof(t) * 8; i++) { \ + for (unsigned j = 0; j <= i; j++) { \ + for (unsigned k = 0; k <= j; k++) { \ + t x = (t)1 << i; \ + x |= (t)1 << j; \ + x |= (t)1 << k; \ + expect_##test_suf##_eq(ffs_##suf(x), \ + k, "Unexpected result, x=%" pri, \ + x); \ + } \ + } \ + } \ + } while (0) TEST_BEGIN(test_ffs_u) { - TEST_FFS(unsigned, u, u,"u"); + TEST_FFS(unsigned, u, u, "u"); } TEST_END @@ -145,22 +148,24 @@ TEST_BEGIN(test_ffs_zu) { } TEST_END -#define TEST_FLS(t, suf, test_suf, pri) do { \ - for (unsigned i = 0; i < sizeof(t) * 8; i++) { \ - for (unsigned j = 0; j <= i; j++) { \ - for (unsigned k = 0; k <= j; k++) { \ - t x = (t)1 << i; \ - x |= (t)1 << j; \ - x |= (t)1 << k; \ - expect_##test_suf##_eq(fls_##suf(x), i, \ - "Unexpected result, x=%"pri, x); \ - } \ - } \ - } \ -} while(0) +#define TEST_FLS(t, suf, test_suf, pri) \ + do { \ + for (unsigned i = 0; i < sizeof(t) * 8; i++) { \ + for (unsigned j = 0; j <= i; j++) { \ + for (unsigned k = 0; k <= j; k++) { \ + t x = (t)1 << i; \ + x |= (t)1 << j; \ + x |= (t)1 << k; \ + expect_##test_suf##_eq(fls_##suf(x), \ + i, "Unexpected result, x=%" pri, \ + x); \ + } \ + } \ + } \ + } while (0) TEST_BEGIN(test_fls_u) { - TEST_FLS(unsigned, u, u,"u"); + TEST_FLS(unsigned, u, u, "u"); } TEST_END @@ -190,7 +195,7 @@ TEST_BEGIN(test_fls_zu) { TEST_END TEST_BEGIN(test_fls_u_slow) { - TEST_FLS(unsigned, u_slow, u,"u"); + TEST_FLS(unsigned, u_slow, u, "u"); } TEST_END @@ -280,30 +285,11 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_pow2_ceil_u64, - test_pow2_ceil_u32, - test_pow2_ceil_zu, - test_lg_ceil_floor, - test_ffs_u, - test_ffs_lu, - test_ffs_llu, - test_ffs_u32, - test_ffs_u64, - test_ffs_zu, - test_fls_u, - test_fls_lu, - test_fls_llu, - test_fls_u32, - test_fls_u64, - test_fls_zu, - test_fls_u_slow, - test_fls_lu_slow, - test_fls_llu_slow, - test_popcount_u, - test_popcount_u_slow, - test_popcount_lu, - test_popcount_lu_slow, - test_popcount_llu, - test_popcount_llu_slow); + return test_no_reentrancy(test_pow2_ceil_u64, test_pow2_ceil_u32, + test_pow2_ceil_zu, test_lg_ceil_floor, test_ffs_u, test_ffs_lu, + test_ffs_llu, test_ffs_u32, test_ffs_u64, test_ffs_zu, test_fls_u, + test_fls_lu, test_fls_llu, test_fls_u32, test_fls_u64, test_fls_zu, + test_fls_u_slow, test_fls_lu_slow, test_fls_llu_slow, + test_popcount_u, test_popcount_u_slow, test_popcount_lu, + test_popcount_lu_slow, test_popcount_llu, test_popcount_llu_slow); } diff --git a/test/unit/bitmap.c b/test/unit/bitmap.c index 78e542b6..b3048cf3 100644 --- a/test/unit/bitmap.c +++ b/test/unit/bitmap.c @@ -9,14 +9,17 @@ test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { expect_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn), "Unexpected difference between static and dynamic initialization, " - "nbits=%zu", nbits); + "nbits=%zu", + nbits); expect_zu_eq(binfo->nbits, binfo_dyn.nbits, "Unexpected difference between static and dynamic initialization, " - "nbits=%zu", nbits); + "nbits=%zu", + nbits); #ifdef BITMAP_USE_TREE expect_u_eq(binfo->nlevels, binfo_dyn.nlevels, "Unexpected difference between static and dynamic initialization, " - "nbits=%zu", nbits); + "nbits=%zu", + nbits); { unsigned i; @@ -24,7 +27,8 @@ test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { expect_zu_eq(binfo->levels[i].group_offset, binfo_dyn.levels[i].group_offset, "Unexpected difference between static and dynamic " - "initialization, nbits=%zu, level=%u", nbits, i); + "initialization, nbits=%zu, level=%u", + nbits, i); } } #else @@ -34,12 +38,12 @@ test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { } TEST_BEGIN(test_bitmap_initializer) { -#define NB(nbits) { \ - if (nbits <= BITMAP_MAXBITS) { \ - bitmap_info_t binfo = \ - BITMAP_INFO_INITIALIZER(nbits); \ - test_bitmap_initializer_body(&binfo, nbits); \ - } \ +#define NB(nbits) \ + { \ + if (nbits <= BITMAP_MAXBITS) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_initializer_body(&binfo, nbits); \ + } \ } NBITS_TAB #undef NB @@ -47,11 +51,11 @@ TEST_BEGIN(test_bitmap_initializer) { TEST_END static size_t -test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits, - size_t prev_size) { +test_bitmap_size_body( + const bitmap_info_t *binfo, size_t nbits, size_t prev_size) { size_t size = bitmap_size(binfo); - expect_zu_ge(size, (nbits >> 3), - "Bitmap size is smaller than expected"); + expect_zu_ge( + size, (nbits >> 3), "Bitmap size is smaller than expected"); expect_zu_ge(size, prev_size, "Bitmap size is smaller than expected"); return size; } @@ -65,10 +69,10 @@ TEST_BEGIN(test_bitmap_size) { bitmap_info_init(&binfo, nbits); prev_size = test_bitmap_size_body(&binfo, nbits, prev_size); } -#define NB(nbits) { \ - bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ - prev_size = test_bitmap_size_body(&binfo, nbits, \ - prev_size); \ +#define NB(nbits) \ + { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + prev_size = test_bitmap_size_body(&binfo, nbits, prev_size); \ } prev_size = 0; NBITS_TAB @@ -78,14 +82,14 @@ TEST_END static void test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) { - size_t i; + size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); expect_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); for (i = 0; i < nbits; i++) { - expect_false(bitmap_get(bitmap, binfo, i), - "Bit should be unset"); + expect_false( + bitmap_get(bitmap, binfo, i), "Bit should be unset"); } bitmap_init(bitmap, binfo, true); @@ -104,9 +108,10 @@ TEST_BEGIN(test_bitmap_init) { bitmap_info_init(&binfo, nbits); test_bitmap_init_body(&binfo, nbits); } -#define NB(nbits) { \ - bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ - test_bitmap_init_body(&binfo, nbits); \ +#define NB(nbits) \ + { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_init_body(&binfo, nbits); \ } NBITS_TAB #undef NB @@ -115,7 +120,7 @@ TEST_END static void test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) { - size_t i; + size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); expect_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); @@ -135,9 +140,10 @@ TEST_BEGIN(test_bitmap_set) { bitmap_info_init(&binfo, nbits); test_bitmap_set_body(&binfo, nbits); } -#define NB(nbits) { \ - bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ - test_bitmap_set_body(&binfo, nbits); \ +#define NB(nbits) \ + { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_set_body(&binfo, nbits); \ } NBITS_TAB #undef NB @@ -146,7 +152,7 @@ TEST_END static void test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) { - size_t i; + size_t i; bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); expect_ptr_not_null(bitmap, "Unexpected malloc() failure"); bitmap_init(bitmap, binfo, false); @@ -173,9 +179,10 @@ TEST_BEGIN(test_bitmap_unset) { bitmap_info_init(&binfo, nbits); test_bitmap_unset_body(&binfo, nbits); } -#define NB(nbits) { \ - bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ - test_bitmap_unset_body(&binfo, nbits); \ +#define NB(nbits) \ + { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_unset_body(&binfo, nbits); \ } NBITS_TAB #undef NB @@ -193,7 +200,7 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should be just after previous first unset " "bit"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i - 1 : i), i, "First unset bit should be just after previous first unset " "bit"); expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i, @@ -213,7 +220,7 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { bitmap_unset(bitmap, binfo, i); expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should the bit previously unset"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i - 1 : i), i, "First unset bit should the bit previously unset"); expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "First unset bit should the bit previously unset"); @@ -232,7 +239,7 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, "First unset bit should be just after the bit previously " "set"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i - 1 : i), i, "First unset bit should be just after the bit previously " "set"); expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i, @@ -245,7 +252,8 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { } expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1, "First unset bit should be the last bit"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1), + expect_zu_eq( + bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits - 2 : nbits - 1), nbits - 1, "First unset bit should be the last bit"); expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1, "First unset bit should be the last bit"); @@ -258,26 +266,26 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { * bitmap_ffu() finds the correct bit for all five min_bit cases. */ if (nbits >= 3) { - for (size_t i = 0; i < nbits-2; i++) { + for (size_t i = 0; i < nbits - 2; i++) { bitmap_unset(bitmap, binfo, i); - bitmap_unset(bitmap, binfo, i+2); + bitmap_unset(bitmap, binfo, i + 2); if (i > 0) { - expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, - "Unexpected first unset bit"); + expect_zu_eq(bitmap_ffu(bitmap, binfo, i - 1), + i, "Unexpected first unset bit"); } expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "Unexpected first unset bit"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2, + expect_zu_eq(bitmap_ffu(bitmap, binfo, i + 1), i + 2, "Unexpected first unset bit"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2, + expect_zu_eq(bitmap_ffu(bitmap, binfo, i + 2), i + 2, "Unexpected first unset bit"); if (i + 3 < nbits) { - expect_zu_eq(bitmap_ffu(bitmap, binfo, i+3), + expect_zu_eq(bitmap_ffu(bitmap, binfo, i + 3), nbits, "Unexpected first unset bit"); } expect_zu_eq(bitmap_sfu(bitmap, binfo), i, "Unexpected first unset bit"); - expect_zu_eq(bitmap_sfu(bitmap, binfo), i+2, + expect_zu_eq(bitmap_sfu(bitmap, binfo), i + 2, "Unexpected first unset bit"); } } @@ -288,24 +296,24 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { * cases. */ if (nbits >= 3) { - bitmap_unset(bitmap, binfo, nbits-1); - for (size_t i = 0; i < nbits-1; i++) { + bitmap_unset(bitmap, binfo, nbits - 1); + for (size_t i = 0; i < nbits - 1; i++) { bitmap_unset(bitmap, binfo, i); if (i > 0) { - expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, - "Unexpected first unset bit"); + expect_zu_eq(bitmap_ffu(bitmap, binfo, i - 1), + i, "Unexpected first unset bit"); } expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i, "Unexpected first unset bit"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1, - "Unexpected first unset bit"); - expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1), - nbits-1, "Unexpected first unset bit"); + expect_zu_eq(bitmap_ffu(bitmap, binfo, i + 1), + nbits - 1, "Unexpected first unset bit"); + expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), + nbits - 1, "Unexpected first unset bit"); expect_zu_eq(bitmap_sfu(bitmap, binfo), i, "Unexpected first unset bit"); } - expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1, + expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1, "Unexpected first unset bit"); } @@ -322,9 +330,10 @@ TEST_BEGIN(test_bitmap_xfu) { bitmap_info_init(&binfo, nbits); test_bitmap_xfu_body(&binfo, nbits); } -#define NB(nbits) { \ - bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ - test_bitmap_xfu_body(&binfo, nbits); \ +#define NB(nbits) \ + { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_xfu_body(&binfo, nbits); \ } NBITS_TAB #undef NB @@ -333,11 +342,6 @@ TEST_END int main(void) { - return test( - test_bitmap_initializer, - test_bitmap_size, - test_bitmap_init, - test_bitmap_set, - test_bitmap_unset, - test_bitmap_xfu); + return test(test_bitmap_initializer, test_bitmap_size, test_bitmap_init, + test_bitmap_set, test_bitmap_unset, test_bitmap_xfu); } diff --git a/test/unit/buf_writer.c b/test/unit/buf_writer.c index d5e63a0e..643e430c 100644 --- a/test/unit/buf_writer.c +++ b/test/unit/buf_writer.c @@ -5,24 +5,24 @@ #define TEST_BUF_SIZE 16 #define UNIT_MAX (TEST_BUF_SIZE * 3) -static size_t test_write_len; -static char test_buf[TEST_BUF_SIZE]; +static size_t test_write_len; +static char test_buf[TEST_BUF_SIZE]; static uint64_t arg; static uint64_t arg_store; static void test_write_cb(void *cbopaque, const char *s) { size_t prev_test_write_len = test_write_len; - test_write_len += strlen(s); /* only increase the length */ + test_write_len += strlen(s); /* only increase the length */ arg_store = *(uint64_t *)cbopaque; /* only pass along the argument */ - assert_zu_le(prev_test_write_len, test_write_len, - "Test write overflowed"); + assert_zu_le( + prev_test_write_len, test_write_len, "Test write overflowed"); } static void test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) { - char s[UNIT_MAX + 1]; - size_t n_unit, remain, i; + char s[UNIT_MAX + 1]; + size_t n_unit, remain, i; ssize_t unit; assert(buf_writer->buf != NULL); @@ -41,7 +41,8 @@ test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) { remain += unit; if (remain > buf_writer->buf_size) { /* Flushes should have happened. */ - assert_u64_eq(arg_store, arg, "Call " + assert_u64_eq(arg_store, arg, + "Call " "back argument didn't get through"); remain %= buf_writer->buf_size; if (remain == 0) { @@ -51,12 +52,14 @@ test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) { } assert_zu_eq(test_write_len + remain, i * unit, "Incorrect length after writing %zu strings" - " of length %zu", i, unit); + " of length %zu", + i, unit); } buf_writer_flush(buf_writer); expect_zu_eq(test_write_len, n_unit * unit, "Incorrect length after flushing at the end of" - " writing %zu strings of length %zu", n_unit, unit); + " writing %zu strings of length %zu", + n_unit, unit); } } buf_writer_terminate(tsdn, buf_writer); @@ -64,9 +67,9 @@ test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) { TEST_BEGIN(test_buf_write_static) { buf_writer_t buf_writer; - tsdn_t *tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsdn_fetch(); assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg, - test_buf, TEST_BUF_SIZE), + test_buf, TEST_BUF_SIZE), "buf_writer_init() should not encounter error on static buffer"); test_buf_writer_body(tsdn, &buf_writer); } @@ -74,22 +77,24 @@ TEST_END TEST_BEGIN(test_buf_write_dynamic) { buf_writer_t buf_writer; - tsdn_t *tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsdn_fetch(); assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg, - NULL, TEST_BUF_SIZE), "buf_writer_init() should not OOM"); + NULL, TEST_BUF_SIZE), + "buf_writer_init() should not OOM"); test_buf_writer_body(tsdn, &buf_writer); } TEST_END TEST_BEGIN(test_buf_write_oom) { buf_writer_t buf_writer; - tsdn_t *tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsdn_fetch(); assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg, - NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM"); + NULL, SC_LARGE_MAXCLASS + 1), + "buf_writer_init() should OOM"); assert(buf_writer.buf == NULL); - char s[UNIT_MAX + 1]; - size_t n_unit, i; + char s[UNIT_MAX + 1]; + size_t n_unit, i; ssize_t unit; memset(s, 'a', UNIT_MAX); @@ -107,20 +112,22 @@ TEST_BEGIN(test_buf_write_oom) { "Call back argument didn't get through"); assert_zu_eq(test_write_len, i * unit, "Incorrect length after writing %zu strings" - " of length %zu", i, unit); + " of length %zu", + i, unit); } buf_writer_flush(&buf_writer); expect_zu_eq(test_write_len, n_unit * unit, "Incorrect length after flushing at the end of" - " writing %zu strings of length %zu", n_unit, unit); + " writing %zu strings of length %zu", + n_unit, unit); } } buf_writer_terminate(tsdn, &buf_writer); } TEST_END -static int test_read_count; -static size_t test_read_len; +static int test_read_count; +static size_t test_read_len; static uint64_t arg_sum; ssize_t @@ -142,8 +149,8 @@ test_read_cb(void *cbopaque, void *buf, size_t limit) { memset(buf, 'a', read_len); size_t prev_test_read_len = test_read_len; test_read_len += read_len; - assert_zu_le(prev_test_read_len, test_read_len, - "Test read overflowed"); + assert_zu_le( + prev_test_read_len, test_read_len, "Test read overflowed"); return read_len; } } @@ -168,9 +175,9 @@ test_buf_writer_pipe_body(tsdn_t *tsdn, buf_writer_t *buf_writer) { TEST_BEGIN(test_buf_write_pipe) { buf_writer_t buf_writer; - tsdn_t *tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsdn_fetch(); assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg, - test_buf, TEST_BUF_SIZE), + test_buf, TEST_BUF_SIZE), "buf_writer_init() should not encounter error on static buffer"); test_buf_writer_pipe_body(tsdn, &buf_writer); } @@ -178,19 +185,16 @@ TEST_END TEST_BEGIN(test_buf_write_pipe_oom) { buf_writer_t buf_writer; - tsdn_t *tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsdn_fetch(); assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg, - NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM"); + NULL, SC_LARGE_MAXCLASS + 1), + "buf_writer_init() should OOM"); test_buf_writer_pipe_body(tsdn, &buf_writer); } TEST_END int main(void) { - return test( - test_buf_write_static, - test_buf_write_dynamic, - test_buf_write_oom, - test_buf_write_pipe, - test_buf_write_pipe_oom); + return test(test_buf_write_static, test_buf_write_dynamic, + test_buf_write_oom, test_buf_write_pipe, test_buf_write_pipe_oom); } diff --git a/test/unit/cache_bin.c b/test/unit/cache_bin.c index 1bb750d7..dc1dbe36 100644 --- a/test/unit/cache_bin.c +++ b/test/unit/cache_bin.c @@ -3,7 +3,7 @@ static void do_fill_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t ncached_max, cache_bin_sz_t nfill_attempt, cache_bin_sz_t nfill_succeed) { - bool success; + bool success; void *ptr; assert_true(cache_bin_ncached_get_local(bin) == 0, ""); CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill_attempt); @@ -12,17 +12,16 @@ do_fill_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t ncached_max, arr.ptr[i] = &ptrs[i]; } cache_bin_finish_fill(bin, &arr, nfill_succeed); - expect_true(cache_bin_ncached_get_local(bin) == nfill_succeed, - ""); + expect_true(cache_bin_ncached_get_local(bin) == nfill_succeed, ""); cache_bin_low_water_set(bin); for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) { ptr = cache_bin_alloc(bin, &success); expect_true(success, ""); - expect_ptr_eq(ptr, (void *)&ptrs[i], - "Should pop in order filled"); - expect_true(cache_bin_low_water_get(bin) - == nfill_succeed - i - 1, ""); + expect_ptr_eq( + ptr, (void *)&ptrs[i], "Should pop in order filled"); + expect_true( + cache_bin_low_water_get(bin) == nfill_succeed - i - 1, ""); } expect_true(cache_bin_ncached_get_local(bin) == 0, ""); expect_true(cache_bin_low_water_get(bin) == 0, ""); @@ -46,16 +45,15 @@ do_flush_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, } cache_bin_finish_flush(bin, &arr, nflush); - expect_true(cache_bin_ncached_get_local(bin) == nfill - nflush, - ""); + expect_true(cache_bin_ncached_get_local(bin) == nfill - nflush, ""); while (cache_bin_ncached_get_local(bin) > 0) { cache_bin_alloc(bin, &success); } } static void -do_batch_alloc_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, - size_t batch) { +do_batch_alloc_test( + cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, size_t batch) { assert_true(cache_bin_ncached_get_local(bin) == 0, ""); CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill); cache_bin_init_ptr_array_for_fill(bin, &arr, nfill); @@ -72,8 +70,8 @@ do_batch_alloc_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, for (cache_bin_sz_t i = 0; i < (cache_bin_sz_t)n; i++) { expect_ptr_eq(out[i], &ptrs[i], ""); } - expect_true(cache_bin_low_water_get(bin) == nfill - - (cache_bin_sz_t)n, ""); + expect_true( + cache_bin_low_water_get(bin) == nfill - (cache_bin_sz_t)n, ""); while (cache_bin_ncached_get_local(bin) > 0) { bool success; cache_bin_alloc(bin, &success); @@ -98,8 +96,8 @@ test_bin_init(cache_bin_t *bin, cache_bin_info_t *info) { TEST_BEGIN(test_cache_bin) { const int ncached_max = 100; - bool success; - void *ptr; + bool success; + void *ptr; cache_bin_info_t info; cache_bin_info_init(&info, ncached_max); @@ -125,7 +123,7 @@ TEST_BEGIN(test_cache_bin) { */ void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0); assert_ptr_not_null(ptrs, "Unexpected mallocx failure"); - for (cache_bin_sz_t i = 0; i < ncached_max; i++) { + for (cache_bin_sz_t i = 0; i < ncached_max; i++) { expect_true(cache_bin_ncached_get_local(&bin) == i, ""); success = cache_bin_dalloc_easy(&bin, &ptrs[i]); expect_true(success, @@ -133,18 +131,17 @@ TEST_BEGIN(test_cache_bin) { expect_true(cache_bin_low_water_get(&bin) == 0, "Pushes and pops shouldn't change low water of zero."); } - expect_true(cache_bin_ncached_get_local(&bin) == ncached_max, - ""); + expect_true(cache_bin_ncached_get_local(&bin) == ncached_max, ""); success = cache_bin_dalloc_easy(&bin, &ptrs[ncached_max]); expect_false(success, "Shouldn't be able to dalloc into a full bin."); cache_bin_low_water_set(&bin); for (cache_bin_sz_t i = 0; i < ncached_max; i++) { - expect_true(cache_bin_low_water_get(&bin) - == ncached_max - i, ""); - expect_true(cache_bin_ncached_get_local(&bin) - == ncached_max - i, ""); + expect_true( + cache_bin_low_water_get(&bin) == ncached_max - i, ""); + expect_true( + cache_bin_ncached_get_local(&bin) == ncached_max - i, ""); /* * This should fail -- the easy variant can't change the low * water mark. @@ -152,20 +149,21 @@ TEST_BEGIN(test_cache_bin) { ptr = cache_bin_alloc_easy(&bin, &success); expect_ptr_null(ptr, ""); expect_false(success, ""); - expect_true(cache_bin_low_water_get(&bin) - == ncached_max - i, ""); - expect_true(cache_bin_ncached_get_local(&bin) - == ncached_max - i, ""); + expect_true( + cache_bin_low_water_get(&bin) == ncached_max - i, ""); + expect_true( + cache_bin_ncached_get_local(&bin) == ncached_max - i, ""); /* This should succeed, though. */ ptr = cache_bin_alloc(&bin, &success); expect_true(success, ""); expect_ptr_eq(ptr, &ptrs[ncached_max - i - 1], "Alloc should pop in stack order"); - expect_true(cache_bin_low_water_get(&bin) - == ncached_max - i - 1, ""); - expect_true(cache_bin_ncached_get_local(&bin) - == ncached_max - i - 1, ""); + expect_true( + cache_bin_low_water_get(&bin) == ncached_max - i - 1, ""); + expect_true( + cache_bin_ncached_get_local(&bin) == ncached_max - i - 1, + ""); } /* Now we're empty -- all alloc attempts should fail. */ expect_true(cache_bin_ncached_get_local(&bin) == 0, ""); @@ -184,8 +182,7 @@ TEST_BEGIN(test_cache_bin) { for (cache_bin_sz_t i = ncached_max / 2; i < ncached_max; i++) { cache_bin_dalloc_easy(&bin, &ptrs[i]); } - expect_true(cache_bin_ncached_get_local(&bin) == ncached_max, - ""); + expect_true(cache_bin_ncached_get_local(&bin) == ncached_max, ""); for (cache_bin_sz_t i = ncached_max - 1; i >= ncached_max / 2; i--) { /* * Size is bigger than low water -- the reduced version should @@ -208,20 +205,16 @@ TEST_BEGIN(test_cache_bin) { /* Test fill. */ /* Try to fill all, succeed fully. */ - do_fill_test(&bin, ptrs, ncached_max, ncached_max, - ncached_max); + do_fill_test(&bin, ptrs, ncached_max, ncached_max, ncached_max); /* Try to fill all, succeed partially. */ - do_fill_test(&bin, ptrs, ncached_max, ncached_max, - ncached_max / 2); + do_fill_test(&bin, ptrs, ncached_max, ncached_max, ncached_max / 2); /* Try to fill all, fail completely. */ do_fill_test(&bin, ptrs, ncached_max, ncached_max, 0); /* Try to fill some, succeed fully. */ - do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2, - ncached_max / 2); + do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2, ncached_max / 2); /* Try to fill some, succeed partially. */ - do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2, - ncached_max / 4); + do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2, ncached_max / 4); /* Try to fill some, fail completely. */ do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2, 0); @@ -262,11 +255,10 @@ TEST_END static void do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, cache_bin_sz_t nstash) { - expect_true(cache_bin_ncached_get_local(bin) == 0, - "Bin not empty"); - expect_true(cache_bin_nstashed_get_local(bin) == 0, - "Bin not empty"); - expect_true(nfill + nstash <= bin->bin_info.ncached_max, "Exceeded max"); + expect_true(cache_bin_ncached_get_local(bin) == 0, "Bin not empty"); + expect_true(cache_bin_nstashed_get_local(bin) == 0, "Bin not empty"); + expect_true( + nfill + nstash <= bin->bin_info.ncached_max, "Exceeded max"); bool ret; /* Fill */ @@ -274,16 +266,16 @@ do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, ret = cache_bin_dalloc_easy(bin, &ptrs[i]); expect_true(ret, "Unexpected fill failure"); } - expect_true(cache_bin_ncached_get_local(bin) == nfill, - "Wrong cached count"); + expect_true( + cache_bin_ncached_get_local(bin) == nfill, "Wrong cached count"); /* Stash */ for (cache_bin_sz_t i = 0; i < nstash; i++) { ret = cache_bin_stash(bin, &ptrs[i + nfill]); expect_true(ret, "Unexpected stash failure"); } - expect_true(cache_bin_nstashed_get_local(bin) == nstash, - "Wrong stashed count"); + expect_true( + cache_bin_nstashed_get_local(bin) == nstash, "Wrong stashed count"); if (nfill + nstash == bin->bin_info.ncached_max) { ret = cache_bin_dalloc_easy(bin, &ptrs[0]); @@ -300,20 +292,20 @@ do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill], "Should not alloc stashed ptrs"); } - expect_true(cache_bin_ncached_get_local(bin) == 0, - "Wrong cached count"); - expect_true(cache_bin_nstashed_get_local(bin) == nstash, - "Wrong stashed count"); + expect_true( + cache_bin_ncached_get_local(bin) == 0, "Wrong cached count"); + expect_true( + cache_bin_nstashed_get_local(bin) == nstash, "Wrong stashed count"); cache_bin_alloc(bin, &ret); expect_false(ret, "Should not alloc stashed"); /* Clear stashed ones */ cache_bin_finish_flush_stashed(bin); - expect_true(cache_bin_ncached_get_local(bin) == 0, - "Wrong cached count"); - expect_true(cache_bin_nstashed_get_local(bin) == 0, - "Wrong stashed count"); + expect_true( + cache_bin_ncached_get_local(bin) == 0, "Wrong cached count"); + expect_true( + cache_bin_nstashed_get_local(bin) == 0, "Wrong stashed count"); cache_bin_alloc(bin, &ret); expect_false(ret, "Should not alloc from empty bin"); @@ -322,7 +314,7 @@ do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill, TEST_BEGIN(test_cache_bin_stash) { const int ncached_max = 100; - cache_bin_t bin; + cache_bin_t bin; cache_bin_info_t info; cache_bin_info_init(&info, ncached_max); test_bin_init(&bin, &info); @@ -335,15 +327,17 @@ TEST_BEGIN(test_cache_bin_stash) { assert_ptr_not_null(ptrs, "Unexpected mallocx failure"); bool ret; for (cache_bin_sz_t i = 0; i < ncached_max; i++) { - expect_true(cache_bin_ncached_get_local(&bin) == - (i / 2 + i % 2), "Wrong ncached value"); - expect_true(cache_bin_nstashed_get_local(&bin) == - i / 2, "Wrong nstashed value"); + expect_true( + cache_bin_ncached_get_local(&bin) == (i / 2 + i % 2), + "Wrong ncached value"); + expect_true(cache_bin_nstashed_get_local(&bin) == i / 2, + "Wrong nstashed value"); if (i % 2 == 0) { cache_bin_dalloc_easy(&bin, &ptrs[i]); } else { ret = cache_bin_stash(&bin, &ptrs[i]); - expect_true(ret, "Should be able to stash into a " + expect_true(ret, + "Should be able to stash into a " "non-full cache bin"); } } @@ -360,7 +354,8 @@ TEST_BEGIN(test_cache_bin_stash) { expect_true(diff % 2 == 0, "Should be able to alloc"); } else { expect_false(ret, "Should not alloc stashed"); - expect_true(cache_bin_nstashed_get_local(&bin) == ncached_max / 2, + expect_true(cache_bin_nstashed_get_local(&bin) + == ncached_max / 2, "Wrong nstashed value"); } } @@ -368,19 +363,14 @@ TEST_BEGIN(test_cache_bin_stash) { test_bin_init(&bin, &info); do_flush_stashed_test(&bin, ptrs, ncached_max, 0); do_flush_stashed_test(&bin, ptrs, 0, ncached_max); - do_flush_stashed_test(&bin, ptrs, ncached_max / 2, - ncached_max / 2); - do_flush_stashed_test(&bin, ptrs, ncached_max / 4, - ncached_max / 2); - do_flush_stashed_test(&bin, ptrs, ncached_max / 2, - ncached_max / 4); - do_flush_stashed_test(&bin, ptrs, ncached_max / 4, - ncached_max / 4); + do_flush_stashed_test(&bin, ptrs, ncached_max / 2, ncached_max / 2); + do_flush_stashed_test(&bin, ptrs, ncached_max / 4, ncached_max / 2); + do_flush_stashed_test(&bin, ptrs, ncached_max / 2, ncached_max / 4); + do_flush_stashed_test(&bin, ptrs, ncached_max / 4, ncached_max / 4); } TEST_END int main(void) { - return test(test_cache_bin, - test_cache_bin_stash); + return test(test_cache_bin, test_cache_bin_stash); } diff --git a/test/unit/ckh.c b/test/unit/ckh.c index 36142acd..f07892ac 100644 --- a/test/unit/ckh.c +++ b/test/unit/ckh.c @@ -2,55 +2,51 @@ TEST_BEGIN(test_new_delete) { tsd_t *tsd; - ckh_t ckh; + ckh_t ckh; tsd = tsd_fetch(); - expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, - ckh_string_keycomp), "Unexpected ckh_new() error"); + expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), + "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); - expect_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, - ckh_pointer_keycomp), "Unexpected ckh_new() error"); + expect_false( + ckh_new(tsd, &ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), + "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_count_insert_search_remove) { - tsd_t *tsd; - ckh_t ckh; - const char *strs[] = { - "a string", - "A string", - "a string.", - "A string." - }; + tsd_t *tsd; + ckh_t ckh; + const char *strs[] = {"a string", "A string", "a string.", "A string."}; const char *missing = "A string not in the hash table."; - size_t i; + size_t i; tsd = tsd_fetch(); - expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, - ckh_string_keycomp), "Unexpected ckh_new() error"); + expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), + "Unexpected ckh_new() error"); expect_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); /* Insert. */ - for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + for (i = 0; i < sizeof(strs) / sizeof(const char *); i++) { ckh_insert(tsd, &ckh, strs[i], strs[i]); - expect_zu_eq(ckh_count(&ckh), i+1, - "ckh_count() should return %zu, but it returned %zu", i+1, + expect_zu_eq(ckh_count(&ckh), i + 1, + "ckh_count() should return %zu, but it returned %zu", i + 1, ckh_count(&ckh)); } /* Search. */ - for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + for (i = 0; i < sizeof(strs) / sizeof(const char *); i++) { union { - void *p; + void *p; const char *s; } k, v; - void **kp, **vp; + void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; @@ -62,21 +58,21 @@ TEST_BEGIN(test_count_insert_search_remove) { ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; - expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", - i); - expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", - i); + expect_ptr_eq( + (void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); + expect_ptr_eq( + (void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); } expect_true(ckh_search(&ckh, missing, NULL, NULL), "Unexpected ckh_search() success"); /* Remove. */ - for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + for (i = 0; i < sizeof(strs) / sizeof(const char *); i++) { union { - void *p; + void *p; const char *s; } k, v; - void **kp, **vp; + void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; @@ -88,14 +84,14 @@ TEST_BEGIN(test_count_insert_search_remove) { ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; - expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", - i); - expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", - i); + expect_ptr_eq( + (void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); + expect_ptr_eq( + (void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); expect_zu_eq(ckh_count(&ckh), - sizeof(strs)/sizeof(const char *) - i - 1, + sizeof(strs) / sizeof(const char *) - i - 1, "ckh_count() should return %zu, but it returned %zu", - sizeof(strs)/sizeof(const char *) - i - 1, + sizeof(strs) / sizeof(const char *) - i - 1, ckh_count(&ckh)); } @@ -106,18 +102,19 @@ TEST_END TEST_BEGIN(test_insert_iter_remove) { #define NITEMS ZU(1000) tsd_t *tsd; - ckh_t ckh; + ckh_t ckh; void **p[NITEMS]; - void *q, *r; + void *q, *r; size_t i; tsd = tsd_fetch(); - expect_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, - ckh_pointer_keycomp), "Unexpected ckh_new() error"); + expect_false( + ckh_new(tsd, &ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), + "Unexpected ckh_new() error"); for (i = 0; i < NITEMS; i++) { - p[i] = mallocx(i+1, 0); + p[i] = mallocx(i + 1, 0); expect_ptr_not_null(p[i], "Unexpected mallocx() failure"); } @@ -151,7 +148,7 @@ TEST_BEGIN(test_insert_iter_remove) { } { - bool seen[NITEMS]; + bool seen[NITEMS]; size_t tabind; memset(seen, 0, sizeof(seen)); @@ -195,8 +192,8 @@ TEST_BEGIN(test_insert_iter_remove) { } expect_zu_eq(ckh_count(&ckh), 0, - "ckh_count() should return %zu, but it returned %zu", - ZU(0), ckh_count(&ckh)); + "ckh_count() should return %zu, but it returned %zu", ZU(0), + ckh_count(&ckh)); ckh_delete(tsd, &ckh); #undef NITEMS } @@ -204,8 +201,6 @@ TEST_END int main(void) { - return test( - test_new_delete, - test_count_insert_search_remove, + return test(test_new_delete, test_count_insert_search_remove, test_insert_iter_remove); } diff --git a/test/unit/counter.c b/test/unit/counter.c index 277baac1..04100daa 100644 --- a/test/unit/counter.c +++ b/test/unit/counter.c @@ -11,7 +11,7 @@ TEST_BEGIN(test_counter_accum) { counter_accum_init(&c, interval); tsd_t *tsd = tsd_fetch(); - bool trigger; + bool trigger; for (unsigned i = 0; i < n; i++) { trigger = counter_accum(tsd_tsdn(tsd), &c, increment); accum += increment; @@ -39,8 +39,8 @@ static void * thd_start(void *varg) { counter_accum_t *c = (counter_accum_t *)varg; - tsd_t *tsd = tsd_fetch(); - bool trigger; + tsd_t *tsd = tsd_fetch(); + bool trigger; uintptr_t n_triggered = 0; for (unsigned i = 0; i < N_ITER_THD; i++) { trigger = counter_accum(tsd_tsdn(tsd), c, ITER_INCREMENT); @@ -50,12 +50,11 @@ thd_start(void *varg) { return (void *)n_triggered; } - TEST_BEGIN(test_counter_mt) { counter_accum_t shared_c; counter_accum_init(&shared_c, interval); - thd_t thds[N_THDS]; + thd_t thds[N_THDS]; unsigned i; for (i = 0; i < N_THDS; i++) { thd_create(&thds[i], thd_start, (void *)&shared_c); @@ -74,7 +73,5 @@ TEST_END int main(void) { - return test( - test_counter_accum, - test_counter_mt); + return test(test_counter_accum, test_counter_mt); } diff --git a/test/unit/decay.c b/test/unit/decay.c index bdb6d0a3..10740a85 100644 --- a/test/unit/decay.c +++ b/test/unit/decay.c @@ -22,12 +22,11 @@ TEST_BEGIN(test_decay_init) { TEST_END TEST_BEGIN(test_decay_ms_valid) { - expect_false(decay_ms_valid(-7), - "Misclassified negative decay as valid"); + expect_false( + decay_ms_valid(-7), "Misclassified negative decay as valid"); expect_true(decay_ms_valid(-1), "Misclassified -1 (never decay) as invalid decay"); - expect_true(decay_ms_valid(8943), - "Misclassified valid decay"); + expect_true(decay_ms_valid(8943), "Misclassified valid decay"); if (SSIZE_MAX > NSTIME_SEC_MAX) { expect_false( decay_ms_valid((ssize_t)(NSTIME_SEC_MAX * KQU(1000) + 39)), @@ -111,12 +110,12 @@ TEST_BEGIN(test_decay_empty) { assert_false(err, ""); uint64_t time_between_calls = decay_epoch_duration_ns(&decay) / 5; - int nepochs = 0; + int nepochs = 0; for (uint64_t i = 0; i < decay_ns / time_between_calls * 10; i++) { size_t dirty_pages = 0; nstime_init(&curtime, i * time_between_calls); - bool epoch_advanced = decay_maybe_advance_epoch(&decay, - &curtime, dirty_pages); + bool epoch_advanced = decay_maybe_advance_epoch( + &decay, &curtime, dirty_pages); if (epoch_advanced) { nepochs++; expect_zu_eq(decay_npages_limit_get(&decay), 0, @@ -158,30 +157,32 @@ TEST_BEGIN(test_decay) { nstime_init(&epochtime, decay_epoch_duration_ns(&decay)); const size_t dirty_pages_per_epoch = 1000; - size_t dirty_pages = 0; - uint64_t epoch_ns = decay_epoch_duration_ns(&decay); - bool epoch_advanced = false; + size_t dirty_pages = 0; + uint64_t epoch_ns = decay_epoch_duration_ns(&decay); + bool epoch_advanced = false; /* Populate backlog with some dirty pages */ for (uint64_t i = 0; i < nepoch_init; i++) { nstime_add(&curtime, &epochtime); dirty_pages += dirty_pages_per_epoch; - epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime, - dirty_pages); + epoch_advanced |= decay_maybe_advance_epoch( + &decay, &curtime, dirty_pages); } expect_true(epoch_advanced, "Epoch never advanced"); size_t npages_limit = decay_npages_limit_get(&decay); - expect_zu_gt(npages_limit, 0, "npages_limit is incorrectly equal " + expect_zu_gt(npages_limit, 0, + "npages_limit is incorrectly equal " "to zero after dirty pages have been added"); /* Keep dirty pages unchanged and verify that npages_limit decreases */ for (uint64_t i = nepoch_init; i * epoch_ns < decay_ns; ++i) { nstime_add(&curtime, &epochtime); - epoch_advanced = decay_maybe_advance_epoch(&decay, &curtime, - dirty_pages); + epoch_advanced = decay_maybe_advance_epoch( + &decay, &curtime, dirty_pages); if (epoch_advanced) { - size_t npages_limit_new = decay_npages_limit_get(&decay); + size_t npages_limit_new = decay_npages_limit_get( + &decay); expect_zu_lt(npages_limit_new, npages_limit, "napges_limit failed to decay"); @@ -189,20 +190,22 @@ TEST_BEGIN(test_decay) { } } - expect_zu_gt(npages_limit, 0, "npages_limit decayed to zero earlier " + expect_zu_gt(npages_limit, 0, + "npages_limit decayed to zero earlier " "than decay_ms since last dirty page was added"); /* Completely push all dirty pages out of the backlog */ epoch_advanced = false; for (uint64_t i = 0; i < nepoch_init; i++) { nstime_add(&curtime, &epochtime); - epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime, - dirty_pages); + epoch_advanced |= decay_maybe_advance_epoch( + &decay, &curtime, dirty_pages); } expect_true(epoch_advanced, "Epoch never advanced"); npages_limit = decay_npages_limit_get(&decay); - expect_zu_eq(npages_limit, 0, "npages_limit didn't decay to 0 after " + expect_zu_eq(npages_limit, 0, + "npages_limit didn't decay to 0 after " "decay_ms since last bump in dirty pages"); } TEST_END @@ -230,29 +233,29 @@ TEST_BEGIN(test_decay_ns_until_purge) { "Failed to return unbounded wait time for zero threshold"); const size_t dirty_pages_per_epoch = 1000; - size_t dirty_pages = 0; - bool epoch_advanced = false; + size_t dirty_pages = 0; + bool epoch_advanced = false; for (uint64_t i = 0; i < nepoch_init; i++) { nstime_add(&curtime, &epochtime); dirty_pages += dirty_pages_per_epoch; - epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime, - dirty_pages); + epoch_advanced |= decay_maybe_advance_epoch( + &decay, &curtime, dirty_pages); } expect_true(epoch_advanced, "Epoch never advanced"); - uint64_t ns_until_purge_all = decay_ns_until_purge(&decay, - dirty_pages, dirty_pages); + uint64_t ns_until_purge_all = decay_ns_until_purge( + &decay, dirty_pages, dirty_pages); expect_u64_ge(ns_until_purge_all, decay_ns, "Incorrectly calculated time to purge all pages"); - uint64_t ns_until_purge_none = decay_ns_until_purge(&decay, - dirty_pages, 0); + uint64_t ns_until_purge_none = decay_ns_until_purge( + &decay, dirty_pages, 0); expect_u64_eq(ns_until_purge_none, decay_epoch_duration_ns(&decay) * 2, "Incorrectly calculated time to purge 0 pages"); uint64_t npages_threshold = dirty_pages / 2; - uint64_t ns_until_purge_half = decay_ns_until_purge(&decay, - dirty_pages, npages_threshold); + uint64_t ns_until_purge_half = decay_ns_until_purge( + &decay, dirty_pages, npages_threshold); nstime_t waittime; nstime_init(&waittime, ns_until_purge_half); @@ -263,7 +266,7 @@ TEST_BEGIN(test_decay_ns_until_purge) { expect_zu_lt(npages_limit, dirty_pages, "npages_limit failed to decrease after waiting"); size_t expected = dirty_pages - npages_limit; - int deviation = abs((int)expected - (int)(npages_threshold)); + int deviation = abs((int)expected - (int)(npages_threshold)); expect_d_lt(deviation, (int)(npages_threshold / 2), "After waiting, number of pages is out of the expected interval " "[0.5 * npages_threshold .. 1.5 * npages_threshold]"); @@ -272,12 +275,7 @@ TEST_END int main(void) { - return test( - test_decay_init, - test_decay_ms_valid, - test_decay_npages_purge_in, - test_decay_maybe_advance_epoch, - test_decay_empty, - test_decay, - test_decay_ns_until_purge); + return test(test_decay_init, test_decay_ms_valid, + test_decay_npages_purge_in, test_decay_maybe_advance_epoch, + test_decay_empty, test_decay, test_decay_ns_until_purge); } diff --git a/test/unit/div.c b/test/unit/div.c index 29aea665..53447f4a 100644 --- a/test/unit/div.c +++ b/test/unit/div.c @@ -11,12 +11,12 @@ TEST_BEGIN(test_div_exhaustive) { max = 1000 * 1000; } for (size_t dividend = 0; dividend < 1000 * divisor; - dividend += divisor) { - size_t quotient = div_compute( - &div_info, dividend); + dividend += divisor) { + size_t quotient = div_compute(&div_info, dividend); expect_zu_eq(dividend, quotient * divisor, "With divisor = %zu, dividend = %zu, " - "got quotient %zu", divisor, dividend, quotient); + "got quotient %zu", + divisor, dividend, quotient); } } } @@ -24,6 +24,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_div_exhaustive); + return test_no_reentrancy(test_div_exhaustive); } diff --git a/test/unit/double_free.c b/test/unit/double_free.c index b6ae8f75..4bd6ab73 100644 --- a/test/unit/double_free.c +++ b/test/unit/double_free.c @@ -4,7 +4,8 @@ #include "jemalloc/internal/safety_check.h" bool fake_abort_called; -void fake_abort(const char *message) { +void +fake_abort(const char *message) { (void)message; fake_abort_called = true; } @@ -23,10 +24,9 @@ test_double_free_post(void) { static bool tcache_enabled(void) { - bool enabled; + bool enabled; size_t sz = sizeof(enabled); - assert_d_eq( - mallctl("thread.tcache.enabled", &enabled, &sz, NULL, 0), 0, + assert_d_eq(mallctl("thread.tcache.enabled", &enabled, &sz, NULL, 0), 0, "Unexpected mallctl failure"); return enabled; } @@ -41,7 +41,7 @@ TEST_BEGIN(test_large_double_free_tcache) { test_double_free_pre(); char *ptr = malloc(SC_LARGE_MINCLASS); - bool guarded = extent_is_guarded(tsdn_fetch(), ptr); + bool guarded = extent_is_guarded(tsdn_fetch(), ptr); free(ptr); if (!guarded) { free(ptr); @@ -64,7 +64,7 @@ TEST_BEGIN(test_large_double_free_no_tcache) { test_double_free_pre(); char *ptr = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE); - bool guarded = extent_is_guarded(tsdn_fetch(), ptr); + bool guarded = extent_is_guarded(tsdn_fetch(), ptr); dallocx(ptr, MALLOCX_TCACHE_NONE); if (!guarded) { dallocx(ptr, MALLOCX_TCACHE_NONE); @@ -87,7 +87,7 @@ TEST_BEGIN(test_small_double_free_tcache) { test_double_free_pre(); char *ptr = malloc(1); - bool guarded = extent_is_guarded(tsdn_fetch(), ptr); + bool guarded = extent_is_guarded(tsdn_fetch(), ptr); free(ptr); if (!guarded) { free(ptr); @@ -115,7 +115,7 @@ TEST_BEGIN(test_small_double_free_arena) { */ char *ptr1 = malloc(1); char *ptr = malloc(1); - bool guarded = extent_is_guarded(tsdn_fetch(), ptr); + bool guarded = extent_is_guarded(tsdn_fetch(), ptr); free(ptr); if (!guarded) { mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); @@ -135,9 +135,7 @@ TEST_END int main(void) { - return test( - test_large_double_free_no_tcache, - test_large_double_free_tcache, - test_small_double_free_tcache, + return test(test_large_double_free_no_tcache, + test_large_double_free_tcache, test_small_double_free_tcache, test_small_double_free_arena); } diff --git a/test/unit/edata_cache.c b/test/unit/edata_cache.c index af1110a9..16ed58b2 100644 --- a/test/unit/edata_cache.c +++ b/test/unit/edata_cache.c @@ -49,16 +49,16 @@ TEST_END static size_t ecf_count(edata_cache_fast_t *ecf) { - size_t count = 0; + size_t count = 0; edata_t *cur; - ql_foreach(cur, &ecf->list.head, ql_link_inactive) { + ql_foreach (cur, &ecf->list.head, ql_link_inactive) { count++; } return count; } TEST_BEGIN(test_edata_cache_fast_simple) { - edata_cache_t ec; + edata_cache_t ec; edata_cache_fast_t ecf; test_edata_cache_init(&ec); @@ -96,7 +96,7 @@ TEST_BEGIN(test_edata_cache_fast_simple) { TEST_END TEST_BEGIN(test_edata_cache_fill) { - edata_cache_t ec; + edata_cache_t ec; edata_cache_fast_t ecf; test_edata_cache_init(&ec); @@ -179,7 +179,7 @@ TEST_BEGIN(test_edata_cache_fill) { TEST_END TEST_BEGIN(test_edata_cache_disable) { - edata_cache_t ec; + edata_cache_t ec; edata_cache_fast_t ecf; test_edata_cache_init(&ec); @@ -198,7 +198,8 @@ TEST_BEGIN(test_edata_cache_disable) { expect_zu_eq(0, ecf_count(&ecf), ""); expect_zu_eq(EDATA_CACHE_FAST_FILL, - atomic_load_zu(&ec.count, ATOMIC_RELAXED), "Disabling should flush"); + atomic_load_zu(&ec.count, ATOMIC_RELAXED), + "Disabling should flush"); edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf); expect_zu_eq(0, ecf_count(&ecf), ""); @@ -218,9 +219,6 @@ TEST_END int main(void) { - return test( - test_edata_cache, - test_edata_cache_fast_simple, - test_edata_cache_fill, - test_edata_cache_disable); + return test(test_edata_cache, test_edata_cache_fast_simple, + test_edata_cache_fill, test_edata_cache_disable); } diff --git a/test/unit/emitter.c b/test/unit/emitter.c index af0da90d..dc53b9eb 100644 --- a/test/unit/emitter.c +++ b/test/unit/emitter.c @@ -12,9 +12,9 @@ static bool print_escaped = false; typedef struct buf_descriptor_s buf_descriptor_t; struct buf_descriptor_s { - char *buf; + char *buf; size_t len; - bool mid_quote; + bool mid_quote; }; /* @@ -56,8 +56,8 @@ forwarding_cb(void *buf_descriptor_v, const char *str) { } } - size_t written = malloc_snprintf(buf_descriptor->buf, - buf_descriptor->len, "%s", str); + size_t written = malloc_snprintf( + buf_descriptor->buf, buf_descriptor->len, "%s", str); expect_zu_eq(written, strlen(str), "Buffer overflow!"); buf_descriptor->buf += written; buf_descriptor->len -= written; @@ -66,19 +66,18 @@ forwarding_cb(void *buf_descriptor_v, const char *str) { static void expect_emit_output(void (*emit_fn)(emitter_t *), - const char *expected_json_output, - const char *expected_json_compact_output, + const char *expected_json_output, const char *expected_json_compact_output, const char *expected_table_output) { - emitter_t emitter; - char buf[MALLOC_PRINTF_BUFSIZE]; + emitter_t emitter; + char buf[MALLOC_PRINTF_BUFSIZE]; buf_descriptor_t buf_descriptor; buf_descriptor.buf = buf; buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; buf_descriptor.mid_quote = false; - emitter_init(&emitter, emitter_output_json, &forwarding_cb, - &buf_descriptor); + emitter_init( + &emitter, emitter_output_json, &forwarding_cb, &buf_descriptor); (*emit_fn)(&emitter); expect_str_eq(expected_json_output, buf, "json output failure"); @@ -89,24 +88,24 @@ expect_emit_output(void (*emit_fn)(emitter_t *), emitter_init(&emitter, emitter_output_json_compact, &forwarding_cb, &buf_descriptor); (*emit_fn)(&emitter); - expect_str_eq(expected_json_compact_output, buf, - "compact json output failure"); + expect_str_eq( + expected_json_compact_output, buf, "compact json output failure"); buf_descriptor.buf = buf; buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; buf_descriptor.mid_quote = false; - emitter_init(&emitter, emitter_output_table, &forwarding_cb, - &buf_descriptor); + emitter_init( + &emitter, emitter_output_table, &forwarding_cb, &buf_descriptor); (*emit_fn)(&emitter); expect_str_eq(expected_table_output, buf, "table output failure"); } static void emit_dict(emitter_t *emitter) { - bool b_false = false; - bool b_true = true; - int i_123 = 123; + bool b_false = false; + bool b_true = true; + int i_123 = 123; const char *str = "a string"; emitter_begin(emitter); @@ -122,48 +121,49 @@ emit_dict(emitter_t *emitter) { } static const char *dict_json = -"{\n" -"\t\"foo\": {\n" -"\t\t\"abc\": false,\n" -"\t\t\"def\": true,\n" -"\t\t\"ghi\": 123,\n" -"\t\t\"jkl\": \"a string\"\n" -"\t}\n" -"}\n"; + "{\n" + "\t\"foo\": {\n" + "\t\t\"abc\": false,\n" + "\t\t\"def\": true,\n" + "\t\t\"ghi\": 123,\n" + "\t\t\"jkl\": \"a string\"\n" + "\t}\n" + "}\n"; static const char *dict_json_compact = -"{" - "\"foo\":{" - "\"abc\":false," - "\"def\":true," - "\"ghi\":123," - "\"jkl\":\"a string\"" - "}" -"}"; + "{" + "\"foo\":{" + "\"abc\":false," + "\"def\":true," + "\"ghi\":123," + "\"jkl\":\"a string\"" + "}" + "}"; static const char *dict_table = -"This is the foo table:\n" -" ABC: false\n" -" DEF: true\n" -" GHI: 123 (note_key1: \"a string\")\n" -" JKL: \"a string\" (note_key2: false)\n"; + "This is the foo table:\n" + " ABC: false\n" + " DEF: true\n" + " GHI: 123 (note_key1: \"a string\")\n" + " JKL: \"a string\" (note_key2: false)\n"; static void emit_table_printf(emitter_t *emitter) { emitter_begin(emitter); emitter_table_printf(emitter, "Table note 1\n"); - emitter_table_printf(emitter, "Table note 2 %s\n", - "with format string"); + emitter_table_printf( + emitter, "Table note 2 %s\n", "with format string"); emitter_end(emitter); } static const char *table_printf_json = -"{\n" -"}\n"; + "{\n" + "}\n"; static const char *table_printf_json_compact = "{}"; static const char *table_printf_table = -"Table note 1\n" -"Table note 2 with format string\n"; + "Table note 1\n" + "Table note 2 with format string\n"; -static void emit_nested_dict(emitter_t *emitter) { +static void +emit_nested_dict(emitter_t *emitter) { int val = 123; emitter_begin(emitter); emitter_dict_begin(emitter, "json1", "Dict 1"); @@ -174,53 +174,53 @@ static void emit_nested_dict(emitter_t *emitter) { emitter_dict_end(emitter); /* Close 3 */ emitter_dict_end(emitter); /* Close 1 */ emitter_dict_begin(emitter, "json4", "Dict 4"); - emitter_kv(emitter, "primitive", "Another primitive", - emitter_type_int, &val); + emitter_kv( + emitter, "primitive", "Another primitive", emitter_type_int, &val); emitter_dict_end(emitter); /* Close 4 */ emitter_end(emitter); } static const char *nested_dict_json = -"{\n" -"\t\"json1\": {\n" -"\t\t\"json2\": {\n" -"\t\t\t\"primitive\": 123\n" -"\t\t},\n" -"\t\t\"json3\": {\n" -"\t\t}\n" -"\t},\n" -"\t\"json4\": {\n" -"\t\t\"primitive\": 123\n" -"\t}\n" -"}\n"; + "{\n" + "\t\"json1\": {\n" + "\t\t\"json2\": {\n" + "\t\t\t\"primitive\": 123\n" + "\t\t},\n" + "\t\t\"json3\": {\n" + "\t\t}\n" + "\t},\n" + "\t\"json4\": {\n" + "\t\t\"primitive\": 123\n" + "\t}\n" + "}\n"; static const char *nested_dict_json_compact = -"{" - "\"json1\":{" - "\"json2\":{" - "\"primitive\":123" - "}," - "\"json3\":{" - "}" - "}," - "\"json4\":{" - "\"primitive\":123" - "}" -"}"; + "{" + "\"json1\":{" + "\"json2\":{" + "\"primitive\":123" + "}," + "\"json3\":{" + "}" + "}," + "\"json4\":{" + "\"primitive\":123" + "}" + "}"; static const char *nested_dict_table = -"Dict 1\n" -" Dict 2\n" -" A primitive: 123\n" -" Dict 3\n" -"Dict 4\n" -" Another primitive: 123\n"; + "Dict 1\n" + " Dict 2\n" + " A primitive: 123\n" + " Dict 3\n" + "Dict 4\n" + " Another primitive: 123\n"; static void emit_types(emitter_t *emitter) { - bool b = false; - int i = -123; - unsigned u = 123; - ssize_t zd = -456; - size_t zu = 456; + bool b = false; + int i = -123; + unsigned u = 123; + ssize_t zd = -456; + size_t zu = 456; const char *str = "string"; const char *long_str = "abcdefghijklmnopqrstuvwxyz " @@ -254,55 +254,55 @@ emit_types(emitter_t *emitter) { } static const char *types_json = -"{\n" -"\t\"k1\": false,\n" -"\t\"k2\": -123,\n" -"\t\"k3\": 123,\n" -"\t\"k4\": -456,\n" -"\t\"k5\": 456,\n" -"\t\"k6\": \"string\",\n" -"\t\"k7\": \"abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz\",\n" -"\t\"k8\": 789,\n" -"\t\"k9\": 10000000000\n" -"}\n"; + "{\n" + "\t\"k1\": false,\n" + "\t\"k2\": -123,\n" + "\t\"k3\": 123,\n" + "\t\"k4\": -456,\n" + "\t\"k5\": 456,\n" + "\t\"k6\": \"string\",\n" + "\t\"k7\": \"abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz\",\n" + "\t\"k8\": 789,\n" + "\t\"k9\": 10000000000\n" + "}\n"; static const char *types_json_compact = -"{" - "\"k1\":false," - "\"k2\":-123," - "\"k3\":123," - "\"k4\":-456," - "\"k5\":456," - "\"k6\":\"string\"," - "\"k7\":\"abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz " - "abcdefghijklmnopqrstuvwxyz\"," - "\"k8\":789," - "\"k9\":10000000000" -"}"; + "{" + "\"k1\":false," + "\"k2\":-123," + "\"k3\":123," + "\"k4\":-456," + "\"k5\":456," + "\"k6\":\"string\"," + "\"k7\":\"abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz " + "abcdefghijklmnopqrstuvwxyz\"," + "\"k8\":789," + "\"k9\":10000000000" + "}"; static const char *types_table = -"K1: false\n" -"K2: -123\n" -"K3: 123\n" -"K4: -456\n" -"K5: 456\n" -"K6: \"string\"\n" -"K7: \"abcdefghijklmnopqrstuvwxyz " + "K1: false\n" + "K2: -123\n" + "K3: 123\n" + "K4: -456\n" + "K5: 456\n" + "K6: \"string\"\n" + "K7: \"abcdefghijklmnopqrstuvwxyz " "abcdefghijklmnopqrstuvwxyz " "abcdefghijklmnopqrstuvwxyz " "abcdefghijklmnopqrstuvwxyz " @@ -312,8 +312,8 @@ static const char *types_table = "abcdefghijklmnopqrstuvwxyz " "abcdefghijklmnopqrstuvwxyz " "abcdefghijklmnopqrstuvwxyz\"\n" -"K8: 789\n" -"K9: 10000000000\n"; + "K8: 789\n" + "K9: 10000000000\n"; static void emit_modal(emitter_t *emitter) { @@ -336,37 +336,37 @@ emit_modal(emitter_t *emitter) { } const char *modal_json = -"{\n" -"\t\"j0\": {\n" -"\t\t\"j1\": {\n" -"\t\t\t\"i1\": 123,\n" -"\t\t\t\"i2\": 123,\n" -"\t\t\t\"i4\": 123\n" -"\t\t},\n" -"\t\t\"i5\": 123,\n" -"\t\t\"i6\": 123\n" -"\t}\n" -"}\n"; + "{\n" + "\t\"j0\": {\n" + "\t\t\"j1\": {\n" + "\t\t\t\"i1\": 123,\n" + "\t\t\t\"i2\": 123,\n" + "\t\t\t\"i4\": 123\n" + "\t\t},\n" + "\t\t\"i5\": 123,\n" + "\t\t\"i6\": 123\n" + "\t}\n" + "}\n"; const char *modal_json_compact = -"{" - "\"j0\":{" - "\"j1\":{" - "\"i1\":123," - "\"i2\":123," - "\"i4\":123" - "}," - "\"i5\":123," - "\"i6\":123" - "}" -"}"; + "{" + "\"j0\":{" + "\"j1\":{" + "\"i1\":123," + "\"i2\":123," + "\"i4\":123" + "}," + "\"i5\":123," + "\"i6\":123" + "}" + "}"; const char *modal_table = -"T0\n" -" I1: 123\n" -" I3: 123\n" -" T1\n" -" I4: 123\n" -" I5: 123\n" -" I6: 123\n"; + "T0\n" + " I1: 123\n" + " I3: 123\n" + " T1\n" + " I4: 123\n" + " I5: 123\n" + " I6: 123\n"; static void emit_json_array(emitter_t *emitter) { @@ -387,121 +387,124 @@ emit_json_array(emitter_t *emitter) { emitter_json_kv(emitter, "bar", emitter_type_int, &ival); emitter_json_kv(emitter, "baz", emitter_type_int, &ival); emitter_json_object_end(emitter); /* Close arr[3]. */ - emitter_json_array_end(emitter); /* Close arr. */ + emitter_json_array_end(emitter); /* Close arr. */ emitter_json_object_end(emitter); /* Close dict. */ emitter_end(emitter); } static const char *json_array_json = -"{\n" -"\t\"dict\": {\n" -"\t\t\"arr\": [\n" -"\t\t\t{\n" -"\t\t\t\t\"foo\": 123\n" -"\t\t\t},\n" -"\t\t\t123,\n" -"\t\t\t123,\n" -"\t\t\t{\n" -"\t\t\t\t\"bar\": 123,\n" -"\t\t\t\t\"baz\": 123\n" -"\t\t\t}\n" -"\t\t]\n" -"\t}\n" -"}\n"; + "{\n" + "\t\"dict\": {\n" + "\t\t\"arr\": [\n" + "\t\t\t{\n" + "\t\t\t\t\"foo\": 123\n" + "\t\t\t},\n" + "\t\t\t123,\n" + "\t\t\t123,\n" + "\t\t\t{\n" + "\t\t\t\t\"bar\": 123,\n" + "\t\t\t\t\"baz\": 123\n" + "\t\t\t}\n" + "\t\t]\n" + "\t}\n" + "}\n"; static const char *json_array_json_compact = -"{" - "\"dict\":{" - "\"arr\":[" - "{" - "\"foo\":123" - "}," - "123," - "123," - "{" - "\"bar\":123," - "\"baz\":123" - "}" - "]" - "}" -"}"; + "{" + "\"dict\":{" + "\"arr\":[" + "{" + "\"foo\":123" + "}," + "123," + "123," + "{" + "\"bar\":123," + "\"baz\":123" + "}" + "]" + "}" + "}"; static const char *json_array_table = ""; static void emit_json_nested_array(emitter_t *emitter) { - int ival = 123; + int ival = 123; char *sval = "foo"; emitter_begin(emitter); emitter_json_array_begin(emitter); - emitter_json_array_begin(emitter); - emitter_json_value(emitter, emitter_type_int, &ival); - emitter_json_value(emitter, emitter_type_string, &sval); - emitter_json_value(emitter, emitter_type_int, &ival); - emitter_json_value(emitter, emitter_type_string, &sval); - emitter_json_array_end(emitter); - emitter_json_array_begin(emitter); - emitter_json_value(emitter, emitter_type_int, &ival); - emitter_json_array_end(emitter); - emitter_json_array_begin(emitter); - emitter_json_value(emitter, emitter_type_string, &sval); - emitter_json_value(emitter, emitter_type_int, &ival); - emitter_json_array_end(emitter); - emitter_json_array_begin(emitter); - emitter_json_array_end(emitter); + emitter_json_array_begin(emitter); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_value(emitter, emitter_type_string, &sval); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_value(emitter, emitter_type_string, &sval); + emitter_json_array_end(emitter); + emitter_json_array_begin(emitter); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_array_end(emitter); + emitter_json_array_begin(emitter); + emitter_json_value(emitter, emitter_type_string, &sval); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_array_end(emitter); + emitter_json_array_begin(emitter); + emitter_json_array_end(emitter); emitter_json_array_end(emitter); emitter_end(emitter); } static const char *json_nested_array_json = -"{\n" -"\t[\n" -"\t\t[\n" -"\t\t\t123,\n" -"\t\t\t\"foo\",\n" -"\t\t\t123,\n" -"\t\t\t\"foo\"\n" -"\t\t],\n" -"\t\t[\n" -"\t\t\t123\n" -"\t\t],\n" -"\t\t[\n" -"\t\t\t\"foo\",\n" -"\t\t\t123\n" -"\t\t],\n" -"\t\t[\n" -"\t\t]\n" -"\t]\n" -"}\n"; + "{\n" + "\t[\n" + "\t\t[\n" + "\t\t\t123,\n" + "\t\t\t\"foo\",\n" + "\t\t\t123,\n" + "\t\t\t\"foo\"\n" + "\t\t],\n" + "\t\t[\n" + "\t\t\t123\n" + "\t\t],\n" + "\t\t[\n" + "\t\t\t\"foo\",\n" + "\t\t\t123\n" + "\t\t],\n" + "\t\t[\n" + "\t\t]\n" + "\t]\n" + "}\n"; static const char *json_nested_array_json_compact = -"{" - "[" - "[" - "123," - "\"foo\"," - "123," - "\"foo\"" - "]," - "[" - "123" - "]," - "[" - "\"foo\"," - "123" - "]," - "[" - "]" - "]" -"}"; + "{" + "[" + "[" + "123," + "\"foo\"," + "123," + "\"foo\"" + "]," + "[" + "123" + "]," + "[" + "\"foo\"," + "123" + "]," + "[" + "]" + "]" + "}"; static const char *json_nested_array_table = ""; static void emit_table_row(emitter_t *emitter) { emitter_begin(emitter); emitter_row_t row; - emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}}; + emitter_col_t abc = { + emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}}; abc.str_val = "ABC title"; - emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}}; + emitter_col_t def = { + emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}}; def.str_val = "DEF title"; - emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}}; + emitter_col_t ghi = { + emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}}; ghi.str_val = "GHI"; emitter_row_init(&row); @@ -536,21 +539,21 @@ emit_table_row(emitter_t *emitter) { } static const char *table_row_json = -"{\n" -"}\n"; + "{\n" + "}\n"; static const char *table_row_json_compact = "{}"; static const char *table_row_table = -"ABC title DEF title GHI\n" -"123 true 456\n" -"789 false 1011\n" -"\"a string\" false ghi\n"; + "ABC title DEF title GHI\n" + "123 true 456\n" + "789 false 1011\n" + "\"a string\" false ghi\n"; -#define GENERATE_TEST(feature) \ -TEST_BEGIN(test_##feature) { \ - expect_emit_output(emit_##feature, feature##_json, \ - feature##_json_compact, feature##_table); \ -} \ -TEST_END +#define GENERATE_TEST(feature) \ + TEST_BEGIN(test_##feature) { \ + expect_emit_output(emit_##feature, feature##_json, \ + feature##_json_compact, feature##_table); \ + } \ + TEST_END GENERATE_TEST(dict) GENERATE_TEST(table_printf) @@ -563,13 +566,7 @@ GENERATE_TEST(table_row) int main(void) { - return test_no_reentrancy( - test_dict, - test_table_printf, - test_nested_dict, - test_types, - test_modal, - test_json_array, - test_json_nested_array, - test_table_row); + return test_no_reentrancy(test_dict, test_table_printf, + test_nested_dict, test_types, test_modal, test_json_array, + test_json_nested_array, test_table_row); } diff --git a/test/unit/extent_quantize.c b/test/unit/extent_quantize.c index e6bbd539..c178240e 100644 --- a/test/unit/extent_quantize.c +++ b/test/unit/extent_quantize.c @@ -2,9 +2,9 @@ TEST_BEGIN(test_small_extent_size) { unsigned nbins, i; - size_t sz, extent_size; - size_t mib[4]; - size_t miblen = sizeof(mib) / sizeof(size_t); + size_t sz, extent_size; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all small size classes, get their extent sizes, and @@ -21,25 +21,26 @@ TEST_BEGIN(test_small_extent_size) { mib[2] = i; sz = sizeof(size_t); expect_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, - NULL, 0), 0, "Unexpected mallctlbymib failure"); - expect_zu_eq(extent_size, - sz_psz_quantize_floor(extent_size), + NULL, 0), + 0, "Unexpected mallctlbymib failure"); + expect_zu_eq(extent_size, sz_psz_quantize_floor(extent_size), "Small extent quantization should be a no-op " - "(extent_size=%zu)", extent_size); - expect_zu_eq(extent_size, - sz_psz_quantize_ceil(extent_size), + "(extent_size=%zu)", + extent_size); + expect_zu_eq(extent_size, sz_psz_quantize_ceil(extent_size), "Small extent quantization should be a no-op " - "(extent_size=%zu)", extent_size); + "(extent_size=%zu)", + extent_size); } } TEST_END TEST_BEGIN(test_large_extent_size) { - bool cache_oblivious; + bool cache_oblivious; unsigned nlextents, i; - size_t sz, extent_size_prev, ceil_prev; - size_t mib[4]; - size_t miblen = sizeof(mib) / sizeof(size_t); + size_t sz, extent_size_prev, ceil_prev; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all large size classes, get their extent sizes, and @@ -48,11 +49,13 @@ TEST_BEGIN(test_large_extent_size) { sz = sizeof(bool); expect_d_eq(mallctl("opt.cache_oblivious", (void *)&cache_oblivious, - &sz, NULL, 0), 0, "Unexpected mallctl failure"); + &sz, NULL, 0), + 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); - expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); @@ -62,20 +65,21 @@ TEST_BEGIN(test_large_extent_size) { mib[2] = i; sz = sizeof(size_t); expect_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size, - &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); - extent_size = cache_oblivious ? lextent_size + PAGE : - lextent_size; + &sz, NULL, 0), + 0, "Unexpected mallctlbymib failure"); + extent_size = cache_oblivious ? lextent_size + PAGE + : lextent_size; floor = sz_psz_quantize_floor(extent_size); ceil = sz_psz_quantize_ceil(extent_size); expect_zu_eq(extent_size, floor, "Extent quantization should be a no-op for precise size " - "(lextent_size=%zu, extent_size=%zu)", lextent_size, - extent_size); + "(lextent_size=%zu, extent_size=%zu)", + lextent_size, extent_size); expect_zu_eq(extent_size, ceil, "Extent quantization should be a no-op for precise size " - "(lextent_size=%zu, extent_size=%zu)", lextent_size, - extent_size); + "(lextent_size=%zu, extent_size=%zu)", + lextent_size, extent_size); if (i > 0) { expect_zu_eq(extent_size_prev, @@ -85,23 +89,22 @@ TEST_BEGIN(test_large_extent_size) { expect_zu_eq(ceil_prev, extent_size, "Ceiling should be a precise size " "(extent_size_prev=%zu, ceil_prev=%zu, " - "extent_size=%zu)", extent_size_prev, - ceil_prev, extent_size); + "extent_size=%zu)", + extent_size_prev, ceil_prev, extent_size); } } if (i + 1 < nlextents) { extent_size_prev = floor; - ceil_prev = sz_psz_quantize_ceil(extent_size + - PAGE); + ceil_prev = sz_psz_quantize_ceil(extent_size + PAGE); } } } TEST_END TEST_BEGIN(test_monotonic) { -#define SZ_MAX ZU(4 * 1024 * 1024) +#define SZ_MAX ZU(4 * 1024 * 1024) unsigned i; - size_t floor_prev, ceil_prev; + size_t floor_prev, ceil_prev; floor_prev = 0; ceil_prev = 0; @@ -117,12 +120,15 @@ TEST_BEGIN(test_monotonic) { floor, extent_size, ceil); expect_zu_ge(ceil, extent_size, "Ceiling should be >= (floor=%zu, extent_size=%zu, " - "ceil=%zu)", floor, extent_size, ceil); + "ceil=%zu)", + floor, extent_size, ceil); - expect_zu_le(floor_prev, floor, "Floor should be monotonic " + expect_zu_le(floor_prev, floor, + "Floor should be monotonic " "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)", floor_prev, floor, extent_size, ceil); - expect_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " + expect_zu_le(ceil_prev, ceil, + "Ceiling should be monotonic " "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)", floor, extent_size, ceil_prev, ceil); @@ -135,7 +141,5 @@ TEST_END int main(void) { return test( - test_small_extent_size, - test_large_extent_size, - test_monotonic); + test_small_extent_size, test_large_extent_size, test_monotonic); } diff --git a/test/unit/fb.c b/test/unit/fb.c index ad72c75a..26a33fd9 100644 --- a/test/unit/fb.c +++ b/test/unit/fb.c @@ -5,21 +5,19 @@ static void do_test_init(size_t nbits) { - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *fb = malloc(sz); /* Junk fb's contents. */ memset(fb, 99, sz); fb_init(fb, nbits); for (size_t i = 0; i < nbits; i++) { - expect_false(fb_get(fb, nbits, i), - "bitmap should start empty"); + expect_false(fb_get(fb, nbits, i), "bitmap should start empty"); } free(fb); } TEST_BEGIN(test_fb_init) { -#define NB(nbits) \ - do_test_init(nbits); +#define NB(nbits) do_test_init(nbits); NBITS_TAB #undef NB } @@ -27,7 +25,7 @@ TEST_END static void do_test_get_set_unset(size_t nbits) { - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *fb = malloc(sz); fb_init(fb, nbits); /* Set the bits divisible by 3. */ @@ -56,8 +54,7 @@ do_test_get_set_unset(size_t nbits) { } TEST_BEGIN(test_get_set_unset) { -#define NB(nbits) \ - do_test_get_set_unset(nbits); +#define NB(nbits) do_test_get_set_unset(nbits); NBITS_TAB #undef NB } @@ -65,7 +62,7 @@ TEST_END static ssize_t find_3_5_compute(ssize_t i, size_t nbits, bool bit, bool forward) { - for(; i < (ssize_t)nbits && i >= 0; i += (forward ? 1 : -1)) { + for (; i < (ssize_t)nbits && i >= 0; i += (forward ? 1 : -1)) { bool expected_bit = i % 3 == 0 || i % 5 == 0; if (expected_bit == bit) { return i; @@ -76,7 +73,7 @@ find_3_5_compute(ssize_t i, size_t nbits, bool bit, bool forward) { static void do_test_search_simple(size_t nbits) { - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *fb = malloc(sz); fb_init(fb, nbits); @@ -96,7 +93,7 @@ do_test_search_simple(size_t nbits) { expect_zu_eq(ffs_compute, ffs_search, "ffs mismatch at %zu", i); ssize_t fls_compute = find_3_5_compute(i, nbits, true, false); - size_t fls_search = fb_fls(fb, nbits, i); + size_t fls_search = fb_fls(fb, nbits, i); expect_zu_eq(fls_compute, fls_search, "fls mismatch at %zu", i); size_t ffu_compute = find_3_5_compute(i, nbits, false, true); @@ -112,8 +109,7 @@ do_test_search_simple(size_t nbits) { } TEST_BEGIN(test_search_simple) { -#define NB(nbits) \ - do_test_search_simple(nbits); +#define NB(nbits) do_test_search_simple(nbits); NBITS_TAB #undef NB } @@ -145,15 +141,17 @@ expect_exhaustive_results(fb_group_t *mostly_full, fb_group_t *mostly_empty, "mismatch at %zu, %zu", position, special_bit); expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits, position), "mismatch at %zu, %zu", position, special_bit); - expect_zu_eq(position + 1, fb_ffu(mostly_empty, nbits, position), + expect_zu_eq(position + 1, + fb_ffu(mostly_empty, nbits, position), + "mismatch at %zu, %zu", position, special_bit); + expect_zd_eq(position - 1, + fb_flu(mostly_empty, nbits, position), "mismatch at %zu, %zu", position, special_bit); - expect_zd_eq(position - 1, fb_flu(mostly_empty, nbits, - position), "mismatch at %zu, %zu", position, special_bit); expect_zu_eq(position + 1, fb_ffs(mostly_full, nbits, position), "mismatch at %zu, %zu", position, special_bit); - expect_zd_eq(position - 1, fb_fls(mostly_full, nbits, - position), "mismatch at %zu, %zu", position, special_bit); + expect_zd_eq(position - 1, fb_fls(mostly_full, nbits, position), + "mismatch at %zu, %zu", position, special_bit); expect_zu_eq(position, fb_ffu(mostly_full, nbits, position), "mismatch at %zu, %zu", position, special_bit); expect_zd_eq(position, fb_flu(mostly_full, nbits, position), @@ -162,8 +160,8 @@ expect_exhaustive_results(fb_group_t *mostly_full, fb_group_t *mostly_empty, /* position > special_bit. */ expect_zu_eq(nbits, fb_ffs(mostly_empty, nbits, position), "mismatch at %zu, %zu", position, special_bit); - expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits, - position), "mismatch at %zu, %zu", position, special_bit); + expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits, position), + "mismatch at %zu, %zu", position, special_bit); expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position), "mismatch at %zu, %zu", position, special_bit); expect_zd_eq(position, fb_flu(mostly_empty, nbits, position), @@ -186,7 +184,7 @@ do_test_search_exhaustive(size_t nbits) { if (nbits > 1000) { return; } - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *empty = malloc(sz); fb_init(empty, nbits); fb_group_t *full = malloc(sz); @@ -209,8 +207,7 @@ do_test_search_exhaustive(size_t nbits) { } TEST_BEGIN(test_search_exhaustive) { -#define NB(nbits) \ - do_test_search_exhaustive(nbits); +#define NB(nbits) do_test_search_exhaustive(nbits); NBITS_TAB #undef NB } @@ -222,8 +219,8 @@ TEST_BEGIN(test_range_simple) { * big enough that usages of things like weirdnum (below) near the * beginning fit comfortably into the beginning of the bitmap. */ - size_t nbits = 64 * 10; - size_t ngroups = FB_NGROUPS(nbits); + size_t nbits = 64 * 10; + size_t ngroups = FB_NGROUPS(nbits); fb_group_t *fb = malloc(sizeof(fb_group_t) * ngroups); fb_init(fb, nbits); for (size_t i = 0; i < nbits; i++) { @@ -255,7 +252,7 @@ TEST_END static void do_test_empty_full_exhaustive(size_t nbits) { - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *empty = malloc(sz); fb_init(empty, nbits); fb_group_t *full = malloc(sz); @@ -273,15 +270,15 @@ do_test_empty_full_exhaustive(size_t nbits) { expect_false(fb_empty(empty, nbits), "error at bit %zu", i); if (nbits != 1) { - expect_false(fb_full(empty, nbits), - "error at bit %zu", i); - expect_false(fb_empty(full, nbits), - "error at bit %zu", i); + expect_false( + fb_full(empty, nbits), "error at bit %zu", i); + expect_false( + fb_empty(full, nbits), "error at bit %zu", i); } else { - expect_true(fb_full(empty, nbits), - "error at bit %zu", i); - expect_true(fb_empty(full, nbits), - "error at bit %zu", i); + expect_true( + fb_full(empty, nbits), "error at bit %zu", i); + expect_true( + fb_empty(full, nbits), "error at bit %zu", i); } expect_false(fb_full(full, nbits), "error at bit %zu", i); @@ -294,8 +291,7 @@ do_test_empty_full_exhaustive(size_t nbits) { } TEST_BEGIN(test_empty_full) { -#define NB(nbits) \ - do_test_empty_full_exhaustive(nbits); +#define NB(nbits) do_test_empty_full_exhaustive(nbits); NBITS_TAB #undef NB } @@ -306,8 +302,8 @@ TEST_END * built closely on top of it. */ TEST_BEGIN(test_iter_range_simple) { - size_t set_limit = 30; - size_t nbits = 100; + size_t set_limit = 30; + size_t nbits = 100; fb_group_t fb[FB_NGROUPS(100)]; fb_init(fb, nbits); @@ -318,7 +314,7 @@ TEST_BEGIN(test_iter_range_simple) { */ size_t begin = (size_t)-1; size_t len = (size_t)-1; - bool result; + bool result; /* A set of checks with only the first set_limit bits *set*. */ fb_set_range(fb, nbits, 0, set_limit); @@ -410,7 +406,6 @@ TEST_BEGIN(test_iter_range_simple) { expect_zu_eq(0, begin, "Incorrect begin at %zu", i); expect_zu_eq(set_limit, len, "Incorrect len at %zu", i); } - } TEST_END @@ -426,11 +421,11 @@ fb_iter_simple(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, ssize_t stride = (forward ? (ssize_t)1 : (ssize_t)-1); ssize_t range_begin = (ssize_t)start; for (; range_begin != (ssize_t)nbits && range_begin != -1; - range_begin += stride) { + range_begin += stride) { if (fb_get(fb, nbits, range_begin) == val) { ssize_t range_end = range_begin; for (; range_end != (ssize_t)nbits && range_end != -1; - range_end += stride) { + range_end += stride) { if (fb_get(fb, nbits, range_end) != val) { break; } @@ -470,26 +465,26 @@ fb_range_longest_simple(fb_group_t *fb, size_t nbits, bool val) { } static void -expect_iter_results_at(fb_group_t *fb, size_t nbits, size_t pos, - bool val, bool forward) { - bool iter_res; +expect_iter_results_at( + fb_group_t *fb, size_t nbits, size_t pos, bool val, bool forward) { + bool iter_res; size_t iter_begin JEMALLOC_CC_SILENCE_INIT(0); - size_t iter_len JEMALLOC_CC_SILENCE_INIT(0); + size_t iter_len JEMALLOC_CC_SILENCE_INIT(0); if (val) { if (forward) { - iter_res = fb_srange_iter(fb, nbits, pos, - &iter_begin, &iter_len); + iter_res = fb_srange_iter( + fb, nbits, pos, &iter_begin, &iter_len); } else { - iter_res = fb_srange_riter(fb, nbits, pos, - &iter_begin, &iter_len); + iter_res = fb_srange_riter( + fb, nbits, pos, &iter_begin, &iter_len); } } else { if (forward) { - iter_res = fb_urange_iter(fb, nbits, pos, - &iter_begin, &iter_len); + iter_res = fb_urange_iter( + fb, nbits, pos, &iter_begin, &iter_len); } else { - iter_res = fb_urange_riter(fb, nbits, pos, - &iter_begin, &iter_len); + iter_res = fb_urange_riter( + fb, nbits, pos, &iter_begin, &iter_len); } } @@ -500,15 +495,15 @@ expect_iter_results_at(fb_group_t *fb, size_t nbits, size_t pos, */ size_t simple_iter_begin = 0; size_t simple_iter_len = 0; - simple_iter_res = fb_iter_simple(fb, nbits, pos, &simple_iter_begin, - &simple_iter_len, val, forward); + simple_iter_res = fb_iter_simple( + fb, nbits, pos, &simple_iter_begin, &simple_iter_len, val, forward); expect_b_eq(iter_res, simple_iter_res, "Result mismatch at %zu", pos); if (iter_res && simple_iter_res) { assert_zu_eq(iter_begin, simple_iter_begin, "Begin mismatch at %zu", pos); - expect_zu_eq(iter_len, simple_iter_len, - "Length mismatch at %zu", pos); + expect_zu_eq( + iter_len, simple_iter_len, "Length mismatch at %zu", pos); } } @@ -543,7 +538,7 @@ do_test_iter_range_exhaustive(size_t nbits) { if (nbits > 1000) { return; } - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *fb = malloc(sz); fb_init(fb, nbits); @@ -558,7 +553,7 @@ do_test_iter_range_exhaustive(size_t nbits) { expect_iter_results(fb, nbits); fb_unset_range(fb, nbits, 0, nbits); - fb_set_range(fb, nbits, 0, nbits / 2 == 0 ? 1: nbits / 2); + fb_set_range(fb, nbits, 0, nbits / 2 == 0 ? 1 : nbits / 2); expect_iter_results(fb, nbits); free(fb); @@ -569,8 +564,7 @@ do_test_iter_range_exhaustive(size_t nbits) { * computation. */ TEST_BEGIN(test_iter_range_exhaustive) { -#define NB(nbits) \ - do_test_iter_range_exhaustive(nbits); +#define NB(nbits) do_test_iter_range_exhaustive(nbits); NBITS_TAB #undef NB } @@ -581,8 +575,8 @@ TEST_END * returns the number of set bits in [scount_start, scount_end). */ static size_t -scount_contiguous(size_t set_start, size_t set_end, size_t scount_start, - size_t scount_end) { +scount_contiguous( + size_t set_start, size_t set_end, size_t scount_start, size_t scount_end) { /* No overlap. */ if (set_end <= scount_start || scount_end <= set_start) { return 0; @@ -611,8 +605,8 @@ scount_contiguous(size_t set_start, size_t set_end, size_t scount_start, } static size_t -ucount_contiguous(size_t set_start, size_t set_end, size_t ucount_start, - size_t ucount_end) { +ucount_contiguous( + size_t set_start, size_t set_end, size_t ucount_start, size_t ucount_end) { /* No overlap. */ if (set_end <= ucount_start || ucount_end <= set_start) { return ucount_end - ucount_start; @@ -641,34 +635,33 @@ ucount_contiguous(size_t set_start, size_t set_end, size_t ucount_start, } static void -expect_count_match_contiguous(fb_group_t *fb, size_t nbits, size_t set_start, - size_t set_end) { +expect_count_match_contiguous( + fb_group_t *fb, size_t nbits, size_t set_start, size_t set_end) { for (size_t i = 0; i < nbits; i++) { for (size_t j = i + 1; j <= nbits; j++) { size_t cnt = j - i; - size_t scount_expected = scount_contiguous(set_start, - set_end, i, j); + size_t scount_expected = scount_contiguous( + set_start, set_end, i, j); size_t scount_computed = fb_scount(fb, nbits, i, cnt); expect_zu_eq(scount_expected, scount_computed, "fb_scount error with nbits=%zu, start=%zu, " "cnt=%zu, with bits set in [%zu, %zu)", nbits, i, cnt, set_start, set_end); - size_t ucount_expected = ucount_contiguous(set_start, - set_end, i, j); + size_t ucount_expected = ucount_contiguous( + set_start, set_end, i, j); size_t ucount_computed = fb_ucount(fb, nbits, i, cnt); assert_zu_eq(ucount_expected, ucount_computed, "fb_ucount error with nbits=%zu, start=%zu, " "cnt=%zu, with bits set in [%zu, %zu)", nbits, i, cnt, set_start, set_end); - } } } static void do_test_count_contiguous(size_t nbits) { - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *fb = malloc(sz); fb_init(fb, nbits); @@ -688,7 +681,7 @@ do_test_count_contiguous(size_t nbits) { } TEST_BEGIN(test_count_contiguous_simple) { - enum {nbits = 300}; + enum { nbits = 300 }; fb_group_t fb[FB_NGROUPS(nbits)]; fb_init(fb, nbits); /* Just an arbitrary number. */ @@ -718,10 +711,10 @@ TEST_BEGIN(test_count_contiguous_simple) { TEST_END TEST_BEGIN(test_count_contiguous) { -#define NB(nbits) \ - /* This test is *particularly* slow in debug builds. */ \ - if ((!config_debug && nbits < 300) || nbits < 150) { \ - do_test_count_contiguous(nbits); \ +#define NB(nbits) \ + /* This test is *particularly* slow in debug builds. */ \ + if ((!config_debug && nbits < 300) || nbits < 150) { \ + do_test_count_contiguous(nbits); \ } NBITS_TAB #undef NB @@ -729,15 +722,15 @@ TEST_BEGIN(test_count_contiguous) { TEST_END static void -expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd, - size_t nbits) { +expect_count_match_alternating( + fb_group_t *fb_even, fb_group_t *fb_odd, size_t nbits) { for (size_t i = 0; i < nbits; i++) { for (size_t j = i + 1; j <= nbits; j++) { size_t cnt = j - i; size_t odd_scount = cnt / 2 + (size_t)(cnt % 2 == 1 && i % 2 == 1); - size_t odd_scount_computed = fb_scount(fb_odd, nbits, - i, j - i); + size_t odd_scount_computed = fb_scount( + fb_odd, nbits, i, j - i); assert_zu_eq(odd_scount, odd_scount_computed, "fb_scount error with nbits=%zu, start=%zu, " "cnt=%zu, with alternating bits set.", @@ -745,8 +738,8 @@ expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd, size_t odd_ucount = cnt / 2 + (size_t)(cnt % 2 == 1 && i % 2 == 0); - size_t odd_ucount_computed = fb_ucount(fb_odd, nbits, - i, j - i); + size_t odd_ucount_computed = fb_ucount( + fb_odd, nbits, i, j - i); assert_zu_eq(odd_ucount, odd_ucount_computed, "fb_ucount error with nbits=%zu, start=%zu, " "cnt=%zu, with alternating bits set.", @@ -754,8 +747,8 @@ expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd, size_t even_scount = cnt / 2 + (size_t)(cnt % 2 == 1 && i % 2 == 0); - size_t even_scount_computed = fb_scount(fb_even, nbits, - i, j - i); + size_t even_scount_computed = fb_scount( + fb_even, nbits, i, j - i); assert_zu_eq(even_scount, even_scount_computed, "fb_scount error with nbits=%zu, start=%zu, " "cnt=%zu, with alternating bits set.", @@ -763,8 +756,8 @@ expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd, size_t even_ucount = cnt / 2 + (size_t)(cnt % 2 == 1 && i % 2 == 1); - size_t even_ucount_computed = fb_ucount(fb_even, nbits, - i, j - i); + size_t even_ucount_computed = fb_ucount( + fb_even, nbits, i, j - i); assert_zu_eq(even_ucount, even_ucount_computed, "fb_ucount error with nbits=%zu, start=%zu, " "cnt=%zu, with alternating bits set.", @@ -778,7 +771,7 @@ do_test_count_alternating(size_t nbits) { if (nbits > 1000) { return; } - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *fb_even = malloc(sz); fb_group_t *fb_odd = malloc(sz); @@ -800,8 +793,7 @@ do_test_count_alternating(size_t nbits) { } TEST_BEGIN(test_count_alternating) { -#define NB(nbits) \ - do_test_count_alternating(nbits); +#define NB(nbits) do_test_count_alternating(nbits); NBITS_TAB #undef NB } @@ -809,8 +801,9 @@ TEST_END static void do_test_bit_op(size_t nbits, bool (*op)(bool a, bool b), - void (*fb_op)(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits)) { - size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); + void (*fb_op)( + fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits)) { + size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t); fb_group_t *fb1 = malloc(sz); fb_group_t *fb2 = malloc(sz); fb_group_t *fb_result = malloc(sz); @@ -853,8 +846,10 @@ do_test_bit_op(size_t nbits, bool (*op)(bool a, bool b), bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0); /* Original bitmaps shouldn't change. */ - expect_b_eq(bit1, fb_get(fb1, nbits, i), "difference at bit %zu", i); - expect_b_eq(bit2, fb_get(fb2, nbits, i), "difference at bit %zu", i); + expect_b_eq( + bit1, fb_get(fb1, nbits, i), "difference at bit %zu", i); + expect_b_eq( + bit2, fb_get(fb2, nbits, i), "difference at bit %zu", i); /* New one should be bitwise and. */ expect_b_eq(op(bit1, bit2), fb_get(fb_result, nbits, i), @@ -883,8 +878,7 @@ do_test_bit_and(size_t nbits) { } TEST_BEGIN(test_bit_and) { -#define NB(nbits) \ - do_test_bit_and(nbits); +#define NB(nbits) do_test_bit_and(nbits); NBITS_TAB #undef NB } @@ -901,8 +895,7 @@ do_test_bit_or(size_t nbits) { } TEST_BEGIN(test_bit_or) { -#define NB(nbits) \ - do_test_bit_or(nbits); +#define NB(nbits) do_test_bit_or(nbits); NBITS_TAB #undef NB } @@ -915,8 +908,8 @@ binary_not(bool a, bool b) { } static void -fb_bit_not_shim(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, - size_t nbits) { +fb_bit_not_shim( + fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) { (void)src2; fb_bit_not(dst, src1, nbits); } @@ -927,8 +920,7 @@ do_test_bit_not(size_t nbits) { } TEST_BEGIN(test_bit_not) { -#define NB(nbits) \ - do_test_bit_not(nbits); +#define NB(nbits) do_test_bit_not(nbits); NBITS_TAB #undef NB } @@ -936,19 +928,9 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_fb_init, - test_get_set_unset, - test_search_simple, - test_search_exhaustive, - test_range_simple, - test_empty_full, - test_iter_range_simple, - test_iter_range_exhaustive, - test_count_contiguous_simple, - test_count_contiguous, - test_count_alternating, - test_bit_and, - test_bit_or, - test_bit_not); + return test_no_reentrancy(test_fb_init, test_get_set_unset, + test_search_simple, test_search_exhaustive, test_range_simple, + test_empty_full, test_iter_range_simple, test_iter_range_exhaustive, + test_count_contiguous_simple, test_count_contiguous, + test_count_alternating, test_bit_and, test_bit_or, test_bit_not); } diff --git a/test/unit/fork.c b/test/unit/fork.c index 1a4c575e..e52d0a6c 100644 --- a/test/unit/fork.c +++ b/test/unit/fork.c @@ -8,7 +8,7 @@ TEST_BEGIN(test_fork) { /* Set up a manually managed arena for test. */ unsigned arena_ind; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -16,8 +16,8 @@ TEST_BEGIN(test_fork) { unsigned old_arena_ind; sz = sizeof(old_arena_ind); expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, - (void *)&arena_ind, sizeof(arena_ind)), 0, - "Unexpected mallctl() failure"); + (void *)&arena_ind, sizeof(arena_ind)), + 0, "Unexpected mallctl() failure"); p = malloc(1); expect_ptr_not_null(p, "Unexpected malloc() failure"); @@ -108,7 +108,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_fork, - test_fork_multithreaded); + return test_no_reentrancy(test_fork, test_fork_multithreaded); } diff --git a/test/unit/fxp.c b/test/unit/fxp.c index 27f10976..02020efe 100644 --- a/test/unit/fxp.c +++ b/test/unit/fxp.c @@ -28,7 +28,7 @@ fxp_close(fxp_t a, fxp_t b) { static fxp_t xparse_fxp(const char *str) { fxp_t result; - bool err = fxp_parse(&result, str, NULL); + bool err = fxp_parse(&result, str, NULL); assert_false(err, "Invalid fxp string: %s", str); return result; } @@ -36,14 +36,14 @@ xparse_fxp(const char *str) { static void expect_parse_accurate(const char *str, const char *parse_str) { double true_val = strtod(str, NULL); - fxp_t fxp_val; - char *end; - bool err = fxp_parse(&fxp_val, parse_str, &end); + fxp_t fxp_val; + char *end; + bool err = fxp_parse(&fxp_val, parse_str, &end); expect_false(err, "Unexpected parse failure"); - expect_ptr_eq(parse_str + strlen(str), end, - "Didn't parse whole string"); - expect_true(double_close(fxp2double(fxp_val), true_val), - "Misparsed %s", str); + expect_ptr_eq( + parse_str + strlen(str), end, "Didn't parse whole string"); + expect_true( + double_close(fxp2double(fxp_val), true_val), "Misparsed %s", str); } static void @@ -100,12 +100,12 @@ static void expect_parse_failure(const char *str) { fxp_t result = FXP_INIT_INT(333); char *end = (void *)0x123; - bool err = fxp_parse(&result, str, &end); + bool err = fxp_parse(&result, str, &end); expect_true(err, "Expected a parse error on: %s", str); - expect_ptr_eq((void *)0x123, end, - "Parse error shouldn't change results"); - expect_u32_eq(result, FXP_INIT_INT(333), - "Parse error shouldn't change results"); + expect_ptr_eq( + (void *)0x123, end, "Parse error shouldn't change results"); + expect_u32_eq( + result, FXP_INIT_INT(333), "Parse error shouldn't change results"); } TEST_BEGIN(test_parse_invalid) { @@ -129,7 +129,6 @@ expect_init_percent(unsigned percent, const char *str) { "Expect representations of FXP_INIT_PERCENT(%u) and " "fxp_parse(\"%s\") to be equal; got %x and %x", percent, str, result_init, result_parse); - } /* @@ -145,12 +144,12 @@ TEST_BEGIN(test_init_percent) { TEST_END static void -expect_add(const char *astr, const char *bstr, const char* resultstr) { +expect_add(const char *astr, const char *bstr, const char *resultstr) { fxp_t a = xparse_fxp(astr); fxp_t b = xparse_fxp(bstr); fxp_t result = xparse_fxp(resultstr); - expect_true(fxp_close(fxp_add(a, b), result), - "Expected %s + %s == %s", astr, bstr, resultstr); + expect_true(fxp_close(fxp_add(a, b), result), "Expected %s + %s == %s", + astr, bstr, resultstr); } TEST_BEGIN(test_add_simple) { @@ -164,12 +163,12 @@ TEST_BEGIN(test_add_simple) { TEST_END static void -expect_sub(const char *astr, const char *bstr, const char* resultstr) { +expect_sub(const char *astr, const char *bstr, const char *resultstr) { fxp_t a = xparse_fxp(astr); fxp_t b = xparse_fxp(bstr); fxp_t result = xparse_fxp(resultstr); - expect_true(fxp_close(fxp_sub(a, b), result), - "Expected %s - %s == %s", astr, bstr, resultstr); + expect_true(fxp_close(fxp_sub(a, b), result), "Expected %s - %s == %s", + astr, bstr, resultstr); } TEST_BEGIN(test_sub_simple) { @@ -183,12 +182,12 @@ TEST_BEGIN(test_sub_simple) { TEST_END static void -expect_mul(const char *astr, const char *bstr, const char* resultstr) { +expect_mul(const char *astr, const char *bstr, const char *resultstr) { fxp_t a = xparse_fxp(astr); fxp_t b = xparse_fxp(bstr); fxp_t result = xparse_fxp(resultstr); - expect_true(fxp_close(fxp_mul(a, b), result), - "Expected %s * %s == %s", astr, bstr, resultstr); + expect_true(fxp_close(fxp_mul(a, b), result), "Expected %s * %s == %s", + astr, bstr, resultstr); } TEST_BEGIN(test_mul_simple) { @@ -202,12 +201,12 @@ TEST_BEGIN(test_mul_simple) { TEST_END static void -expect_div(const char *astr, const char *bstr, const char* resultstr) { +expect_div(const char *astr, const char *bstr, const char *resultstr) { fxp_t a = xparse_fxp(astr); fxp_t b = xparse_fxp(bstr); fxp_t result = xparse_fxp(resultstr); - expect_true(fxp_close(fxp_div(a, b), result), - "Expected %s / %s == %s", astr, bstr, resultstr); + expect_true(fxp_close(fxp_div(a, b), result), "Expected %s / %s == %s", + astr, bstr, resultstr); } TEST_BEGIN(test_div_simple) { @@ -223,11 +222,11 @@ TEST_END static void expect_round(const char *str, uint32_t rounded_down, uint32_t rounded_nearest) { - fxp_t fxp = xparse_fxp(str); + fxp_t fxp = xparse_fxp(str); uint32_t fxp_rounded_down = fxp_round_down(fxp); uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp); - expect_u32_eq(rounded_down, fxp_rounded_down, - "Mistake rounding %s down", str); + expect_u32_eq( + rounded_down, fxp_rounded_down, "Mistake rounding %s down", str); expect_u32_eq(rounded_nearest, fxp_rounded_nearest, "Mistake rounding %s to nearest", str); } @@ -248,11 +247,11 @@ TEST_END static void expect_mul_frac(size_t a, const char *fracstr, size_t expected) { - fxp_t frac = xparse_fxp(fracstr); + fxp_t frac = xparse_fxp(fracstr); size_t result = fxp_mul_frac(a, frac); expect_true(double_close(expected, result), - "Expected %zu * %s == %zu (fracmul); got %zu", a, fracstr, - expected, result); + "Expected %zu * %s == %zu (fracmul); got %zu", a, fracstr, expected, + result); } TEST_BEGIN(test_mul_frac_simple) { @@ -273,7 +272,7 @@ TEST_END static void expect_print(const char *str) { fxp_t fxp = xparse_fxp(str); - char buf[FXP_BUF_SIZE]; + char buf[FXP_BUF_SIZE]; fxp_print(fxp, buf); expect_d_eq(0, strcmp(str, buf), "Couldn't round-trip print %s", str); } @@ -298,33 +297,32 @@ TEST_BEGIN(test_print_simple) { TEST_END TEST_BEGIN(test_stress) { - const char *numbers[] = { - "0.0", "0.1", "0.2", "0.3", "0.4", - "0.5", "0.6", "0.7", "0.8", "0.9", + const char *numbers[] = {"0.0", "0.1", "0.2", "0.3", "0.4", "0.5", + "0.6", "0.7", "0.8", "0.9", - "1.0", "1.1", "1.2", "1.3", "1.4", - "1.5", "1.6", "1.7", "1.8", "1.9", + "1.0", "1.1", "1.2", "1.3", "1.4", "1.5", "1.6", "1.7", "1.8", + "1.9", - "2.0", "2.1", "2.2", "2.3", "2.4", - "2.5", "2.6", "2.7", "2.8", "2.9", + "2.0", "2.1", "2.2", "2.3", "2.4", "2.5", "2.6", "2.7", "2.8", + "2.9", - "17.0", "17.1", "17.2", "17.3", "17.4", - "17.5", "17.6", "17.7", "17.8", "17.9", + "17.0", "17.1", "17.2", "17.3", "17.4", "17.5", "17.6", "17.7", + "17.8", "17.9", - "18.0", "18.1", "18.2", "18.3", "18.4", - "18.5", "18.6", "18.7", "18.8", "18.9", + "18.0", "18.1", "18.2", "18.3", "18.4", "18.5", "18.6", "18.7", + "18.8", "18.9", - "123.0", "123.1", "123.2", "123.3", "123.4", - "123.5", "123.6", "123.7", "123.8", "123.9", + "123.0", "123.1", "123.2", "123.3", "123.4", "123.5", "123.6", + "123.7", "123.8", "123.9", - "124.0", "124.1", "124.2", "124.3", "124.4", - "124.5", "124.6", "124.7", "124.8", "124.9", + "124.0", "124.1", "124.2", "124.3", "124.4", "124.5", "124.6", + "124.7", "124.8", "124.9", - "125.0", "125.1", "125.2", "125.3", "125.4", - "125.5", "125.6", "125.7", "125.8", "125.9"}; - size_t numbers_len = sizeof(numbers)/sizeof(numbers[0]); + "125.0", "125.1", "125.2", "125.3", "125.4", "125.5", "125.6", + "125.7", "125.8", "125.9"}; + size_t numbers_len = sizeof(numbers) / sizeof(numbers[0]); for (size_t i = 0; i < numbers_len; i++) { - fxp_t fxp_a = xparse_fxp(numbers[i]); + fxp_t fxp_a = xparse_fxp(numbers[i]); double double_a = strtod(numbers[i], NULL); uint32_t fxp_rounded_down = fxp_round_down(fxp_a); @@ -338,37 +336,35 @@ TEST_BEGIN(test_stress) { "Incorrectly rounded-to-nearest %s", numbers[i]); for (size_t j = 0; j < numbers_len; j++) { - fxp_t fxp_b = xparse_fxp(numbers[j]); + fxp_t fxp_b = xparse_fxp(numbers[j]); double double_b = strtod(numbers[j], NULL); - fxp_t fxp_sum = fxp_add(fxp_a, fxp_b); + fxp_t fxp_sum = fxp_add(fxp_a, fxp_b); double double_sum = double_a + double_b; expect_true( double_close(fxp2double(fxp_sum), double_sum), "Miscomputed %s + %s", numbers[i], numbers[j]); if (double_a > double_b) { - fxp_t fxp_diff = fxp_sub(fxp_a, fxp_b); + fxp_t fxp_diff = fxp_sub(fxp_a, fxp_b); double double_diff = double_a - double_b; - expect_true( - double_close(fxp2double(fxp_diff), - double_diff), + expect_true(double_close(fxp2double(fxp_diff), + double_diff), "Miscomputed %s - %s", numbers[i], numbers[j]); } - fxp_t fxp_prod = fxp_mul(fxp_a, fxp_b); + fxp_t fxp_prod = fxp_mul(fxp_a, fxp_b); double double_prod = double_a * double_b; expect_true( double_close(fxp2double(fxp_prod), double_prod), "Miscomputed %s * %s", numbers[i], numbers[j]); if (double_b != 0.0) { - fxp_t fxp_quot = fxp_div(fxp_a, fxp_b); + fxp_t fxp_quot = fxp_div(fxp_a, fxp_b); double double_quot = double_a / double_b; - expect_true( - double_close(fxp2double(fxp_quot), - double_quot), + expect_true(double_close(fxp2double(fxp_quot), + double_quot), "Miscomputed %s / %s", numbers[i], numbers[j]); } @@ -379,16 +375,8 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_parse_valid, - test_parse_invalid, - test_init_percent, - test_add_simple, - test_sub_simple, - test_mul_simple, - test_div_simple, - test_round_simple, - test_mul_frac_simple, - test_print_simple, - test_stress); + return test_no_reentrancy(test_parse_valid, test_parse_invalid, + test_init_percent, test_add_simple, test_sub_simple, + test_mul_simple, test_div_simple, test_round_simple, + test_mul_frac_simple, test_print_simple, test_stress); } diff --git a/test/unit/hash.c b/test/unit/hash.c index 7276333d..e39110fc 100644 --- a/test/unit/hash.c +++ b/test/unit/hash.c @@ -39,24 +39,32 @@ typedef enum { static int hash_variant_bits(hash_variant_t variant) { switch (variant) { - case hash_variant_x86_32: return 32; - case hash_variant_x86_128: return 128; - case hash_variant_x64_128: return 128; - default: not_reached(); + case hash_variant_x86_32: + return 32; + case hash_variant_x86_128: + return 128; + case hash_variant_x64_128: + return 128; + default: + not_reached(); } } static const char * hash_variant_string(hash_variant_t variant) { switch (variant) { - case hash_variant_x86_32: return "hash_x86_32"; - case hash_variant_x86_128: return "hash_x86_128"; - case hash_variant_x64_128: return "hash_x64_128"; - default: not_reached(); + case hash_variant_x86_32: + return "hash_x86_32"; + case hash_variant_x86_128: + return "hash_x86_128"; + case hash_variant_x64_128: + return "hash_x64_128"; + default: + not_reached(); } } -#define KEY_SIZE 256 +#define KEY_SIZE 256 static void hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { const int hashbytes = hash_variant_bits(variant) / 8; @@ -79,20 +87,24 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { switch (variant) { case hash_variant_x86_32: { uint32_t out; - out = hash_x86_32(key, i, 256-i); - memcpy(&hashes[i*hashbytes], &out, hashbytes); + out = hash_x86_32(key, i, 256 - i); + memcpy(&hashes[i * hashbytes], &out, hashbytes); break; - } case hash_variant_x86_128: { + } + case hash_variant_x86_128: { uint64_t out[2]; - hash_x86_128(key, i, 256-i, out); - memcpy(&hashes[i*hashbytes], out, hashbytes); + hash_x86_128(key, i, 256 - i, out); + memcpy(&hashes[i * hashbytes], out, hashbytes); break; - } case hash_variant_x64_128: { + } + case hash_variant_x64_128: { uint64_t out[2]; - hash_x64_128(key, i, 256-i, out); - memcpy(&hashes[i*hashbytes], out, hashbytes); + hash_x64_128(key, i, 256 - i, out); + memcpy(&hashes[i * hashbytes], out, hashbytes); break; - } default: not_reached(); + } + default: + not_reached(); } } @@ -102,36 +114,50 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { uint32_t out = hash_x86_32(hashes, hashes_size, 0); memcpy(final, &out, sizeof(out)); break; - } case hash_variant_x86_128: { + } + case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; - } case hash_variant_x64_128: { + } + case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; - } default: not_reached(); + } + default: + not_reached(); } - computed = - ((uint32_t)final[0] << 0) | - ((uint32_t)final[1] << 8) | - ((uint32_t)final[2] << 16) | - ((uint32_t)final[3] << 24); + computed = ((uint32_t) final[0] << 0) | ((uint32_t) final[1] << 8) + | ((uint32_t) final[2] << 16) | ((uint32_t) final[3] << 24); switch (variant) { #ifdef JEMALLOC_BIG_ENDIAN - case hash_variant_x86_32: expected = 0x6213303eU; break; - case hash_variant_x86_128: expected = 0x266820caU; break; - case hash_variant_x64_128: expected = 0xcc622b6fU; break; + case hash_variant_x86_32: + expected = 0x6213303eU; + break; + case hash_variant_x86_128: + expected = 0x266820caU; + break; + case hash_variant_x64_128: + expected = 0xcc622b6fU; + break; #else - case hash_variant_x86_32: expected = 0xb0f57ee3U; break; - case hash_variant_x86_128: expected = 0xb3ece62aU; break; - case hash_variant_x64_128: expected = 0x6384ba69U; break; + case hash_variant_x86_32: + expected = 0xb0f57ee3U; + break; + case hash_variant_x86_128: + expected = 0xb3ece62aU; + break; + case hash_variant_x64_128: + expected = 0x6384ba69U; + break; #endif - default: not_reached(); + default: + not_reached(); } expect_u32_eq(computed, expected, @@ -141,8 +167,8 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { static void hash_variant_verify(hash_variant_t variant) { -#define MAX_ALIGN 16 - uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; +#define MAX_ALIGN 16 + uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; unsigned i; for (i = 0; i < MAX_ALIGN; i++) { @@ -169,8 +195,5 @@ TEST_END int main(void) { - return test( - test_hash_x86_32, - test_hash_x86_128, - test_hash_x64_128); + return test(test_hash_x86_32, test_hash_x86_128, test_hash_x64_128); } diff --git a/test/unit/hook.c b/test/unit/hook.c index f2a7f190..3a6b3c13 100644 --- a/test/unit/hook.c +++ b/test/unit/hook.c @@ -2,12 +2,12 @@ #include "jemalloc/internal/hook.h" -static void *arg_extra; -static int arg_type; -static void *arg_result; -static void *arg_address; -static size_t arg_old_usize; -static size_t arg_new_usize; +static void *arg_extra; +static int arg_type; +static void *arg_result; +static void *arg_address; +static size_t arg_old_usize; +static size_t arg_new_usize; static uintptr_t arg_result_raw; static uintptr_t arg_args_raw[4]; @@ -71,8 +71,8 @@ set_args_raw(uintptr_t *args_raw, int nargs) { static void expect_args_raw(uintptr_t *args_raw_expected, int nargs) { - int cmp = memcmp(args_raw_expected, arg_args_raw, - sizeof(uintptr_t) * nargs); + int cmp = memcmp( + args_raw_expected, arg_args_raw, sizeof(uintptr_t) * nargs); expect_d_eq(cmp, 0, "Raw args mismatch"); } @@ -95,8 +95,8 @@ test_alloc_hook(void *extra, hook_alloc_t type, void *result, } static void -test_dalloc_hook(void *extra, hook_dalloc_t type, void *address, - uintptr_t args_raw[3]) { +test_dalloc_hook( + void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]) { call_count++; arg_extra = extra; arg_type = (int)type; @@ -122,16 +122,15 @@ test_expand_hook(void *extra, hook_expand_t type, void *address, TEST_BEGIN(test_hooks_basic) { /* Just verify that the record their arguments correctly. */ - hooks_t hooks = { - &test_alloc_hook, &test_dalloc_hook, &test_expand_hook, - (void *)111}; - void *handle = hook_install(TSDN_NULL, &hooks); + hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook, + (void *)111}; + void *handle = hook_install(TSDN_NULL, &hooks); uintptr_t args_raw[4] = {10, 20, 30, 40}; /* Alloc */ reset_args(); - hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333, - args_raw); + hook_invoke_alloc( + hook_alloc_posix_memalign, (void *)222, 333, args_raw); expect_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer"); expect_d_eq((int)hook_alloc_posix_memalign, arg_type, "Passed wrong alloc type"); @@ -142,18 +141,18 @@ TEST_BEGIN(test_hooks_basic) { /* Dalloc */ reset_args(); hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw); - expect_d_eq((int)hook_dalloc_sdallocx, arg_type, - "Passed wrong dalloc type"); + expect_d_eq( + (int)hook_dalloc_sdallocx, arg_type, "Passed wrong dalloc type"); expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer"); expect_ptr_eq((void *)222, arg_address, "Passed wrong address"); expect_args_raw(args_raw, 3); /* Expand */ reset_args(); - hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555, - args_raw); - expect_d_eq((int)hook_expand_xallocx, arg_type, - "Passed wrong expand type"); + hook_invoke_expand( + hook_expand_xallocx, (void *)222, 333, 444, 555, args_raw); + expect_d_eq( + (int)hook_expand_xallocx, arg_type, "Passed wrong expand type"); expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer"); expect_ptr_eq((void *)222, arg_address, "Passed wrong address"); expect_zu_eq(333, arg_old_usize, "Passed wrong old usize"); @@ -205,7 +204,7 @@ TEST_END TEST_BEGIN(test_hooks_remove) { hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL}; - void *handle = hook_install(TSDN_NULL, &hooks); + void *handle = hook_install(TSDN_NULL, &hooks); expect_ptr_ne(handle, NULL, "Hook installation failed"); call_count = 0; uintptr_t args_raw[4] = {10, 20, 30, 40}; @@ -216,14 +215,13 @@ TEST_BEGIN(test_hooks_remove) { hook_remove(TSDN_NULL, handle); hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL); expect_d_eq(call_count, 0, "Hook invoked after removal"); - } TEST_END TEST_BEGIN(test_hooks_alloc_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123}; - void *handle = hook_install(TSDN_NULL, &hooks); + void *handle = hook_install(TSDN_NULL, &hooks); expect_ptr_ne(handle, NULL, "Hook installation failed"); /* Stop malloc from being optimized away. */ @@ -237,8 +235,8 @@ TEST_BEGIN(test_hooks_alloc_simple) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); free(ptr); @@ -247,11 +245,11 @@ TEST_BEGIN(test_hooks_alloc_simple) { err = posix_memalign((void **)&ptr, 1024, 1); expect_d_eq(call_count, 1, "Hook not called"); expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); - expect_d_eq(arg_type, (int)hook_alloc_posix_memalign, - "Wrong hook type"); + expect_d_eq( + arg_type, (int)hook_alloc_posix_memalign, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)err, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument"); expect_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument"); @@ -262,11 +260,10 @@ TEST_BEGIN(test_hooks_alloc_simple) { ptr = aligned_alloc(1024, 1); expect_d_eq(call_count, 1, "Hook not called"); expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); - expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc, - "Wrong hook type"); + expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); free(ptr); @@ -278,8 +275,8 @@ TEST_BEGIN(test_hooks_alloc_simple) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument"); free(ptr); @@ -292,8 +289,8 @@ TEST_BEGIN(test_hooks_alloc_simple) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); free(ptr); @@ -307,8 +304,8 @@ TEST_BEGIN(test_hooks_alloc_simple) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); free(ptr); #endif /* JEMALLOC_OVERRIDE_VALLOC */ @@ -321,8 +318,8 @@ TEST_BEGIN(test_hooks_alloc_simple) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_pvalloc, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); free(ptr); #endif /* JEMALLOC_OVERRIDE_PVALLOC */ @@ -334,11 +331,11 @@ TEST_BEGIN(test_hooks_alloc_simple) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); - expect_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1], - "Wrong flags"); + expect_u64_eq( + (uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1], "Wrong flags"); free(ptr); hook_remove(TSDN_NULL, handle); @@ -348,7 +345,7 @@ TEST_END TEST_BEGIN(test_hooks_dalloc_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123}; - void *handle = hook_install(TSDN_NULL, &hooks); + void *handle = hook_install(TSDN_NULL, &hooks); expect_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; @@ -372,8 +369,8 @@ TEST_BEGIN(test_hooks_dalloc_simple) { expect_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type"); expect_ptr_eq(ptr, arg_address, "Wrong pointer freed"); expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); - expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1], - "Wrong raw arg"); + expect_u64_eq( + (uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1], "Wrong raw arg"); /* sdallocx() */ reset(); @@ -385,8 +382,8 @@ TEST_BEGIN(test_hooks_dalloc_simple) { expect_ptr_eq(ptr, arg_address, "Wrong pointer freed"); expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg"); - expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2], - "Wrong raw arg"); + expect_u64_eq( + (uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2], "Wrong raw arg"); hook_remove(TSDN_NULL, handle); } @@ -395,7 +392,7 @@ TEST_END TEST_BEGIN(test_hooks_expand_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123}; - void *handle = hook_install(TSDN_NULL, &hooks); + void *handle = hook_install(TSDN_NULL, &hooks); expect_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; @@ -421,9 +418,9 @@ TEST_BEGIN(test_hooks_expand_simple) { TEST_END TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) { - hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, - &test_expand_hook, (void *)123}; - void *handle = hook_install(TSDN_NULL, &hooks); + hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook, + (void *)123}; + void *handle = hook_install(TSDN_NULL, &hooks); expect_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; @@ -435,8 +432,8 @@ TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); free(ptr); @@ -448,14 +445,11 @@ TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) { realloc(ptr, 0); expect_d_eq(call_count, 1, "Hook not called"); expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); - expect_d_eq(arg_type, (int)hook_dalloc_realloc, - "Wrong hook type"); - expect_ptr_eq(ptr, arg_address, - "Wrong pointer freed"); - expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], - "Wrong raw arg"); - expect_u64_eq((uintptr_t)0, arg_args_raw[1], - "Wrong raw arg"); + expect_d_eq( + arg_type, (int)hook_dalloc_realloc, "Wrong hook type"); + expect_ptr_eq(ptr, arg_address, "Wrong pointer freed"); + expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); + expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong raw arg"); } /* realloc(NULL, 0) as malloc(0) */ @@ -465,8 +459,8 @@ TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) { expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type"); expect_ptr_eq(ptr, arg_result, "Wrong result"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument"); free(ptr); @@ -478,9 +472,9 @@ TEST_END static void do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags, int expand_type, int dalloc_type) { - hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, - &test_expand_hook, (void *)123}; - void *handle = hook_install(TSDN_NULL, &hooks); + hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, &test_expand_hook, + (void *)123}; + void *handle = hook_install(TSDN_NULL, &hooks); expect_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; @@ -496,8 +490,8 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags, expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_d_eq(arg_type, expand_type, "Wrong hook type"); expect_ptr_eq(ptr, arg_address, "Wrong address"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument"); free(ptr); @@ -522,11 +516,11 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags, } expect_ptr_eq(arg_extra, (void *)123, "Wrong extra"); expect_ptr_eq(ptr2, arg_address, "Wrong address"); - expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument"); - expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1], - "Wrong argument"); + expect_u64_eq( + (uintptr_t)2 * 1024 * 1024, arg_args_raw[1], "Wrong argument"); free(ptr); /* Realloc with move, small. */ @@ -540,8 +534,8 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags, expect_d_eq(arg_type, dalloc_type, "Wrong hook type"); expect_ptr_eq(ptr, arg_address, "Wrong address"); expect_ptr_eq(ptr2, arg_result, "Wrong address"); - expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr2, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); expect_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument"); free(ptr2); @@ -557,11 +551,11 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags, expect_d_eq(arg_type, dalloc_type, "Wrong hook type"); expect_ptr_eq(ptr, arg_address, "Wrong address"); expect_ptr_eq(ptr2, arg_result, "Wrong address"); - expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw, - "Wrong raw result"); + expect_u64_eq( + (uintptr_t)ptr2, (uintptr_t)arg_result_raw, "Wrong raw result"); expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); - expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1], - "Wrong argument"); + expect_u64_eq( + (uintptr_t)2 * 1024 * 1024, arg_args_raw[1], "Wrong argument"); free(ptr2); hook_remove(TSDN_NULL, handle); @@ -573,8 +567,8 @@ realloc_wrapper(void *ptr, size_t size, UNUSED int flags) { } TEST_BEGIN(test_hooks_realloc) { - do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc, - hook_dalloc_realloc); + do_realloc_test( + &realloc_wrapper, 0, hook_expand_realloc, hook_dalloc_realloc); } TEST_END @@ -587,14 +581,9 @@ TEST_END int main(void) { /* We assert on call counts. */ - return test_no_reentrancy( - test_hooks_basic, - test_hooks_null, - test_hooks_remove, - test_hooks_alloc_simple, - test_hooks_dalloc_simple, - test_hooks_expand_simple, - test_hooks_realloc_as_malloc_or_free, - test_hooks_realloc, + return test_no_reentrancy(test_hooks_basic, test_hooks_null, + test_hooks_remove, test_hooks_alloc_simple, + test_hooks_dalloc_simple, test_hooks_expand_simple, + test_hooks_realloc_as_malloc_or_free, test_hooks_realloc, test_hooks_rallocx); } diff --git a/test/unit/hpa.c b/test/unit/hpa.c index 47fa25f2..1fed8a80 100644 --- a/test/unit/hpa.c +++ b/test/unit/hpa.c @@ -13,55 +13,53 @@ struct test_data_s { * Must be the first member -- we convert back and forth between the * test_data_t and the hpa_shard_t; */ - hpa_shard_t shard; + hpa_shard_t shard; hpa_central_t central; - base_t *base; + base_t *base; edata_cache_t shard_edata_cache; emap_t emap; }; static hpa_shard_opts_t test_hpa_shard_opts_default = { - /* slab_max_alloc */ - ALLOC_MAX, - /* hugification_threshold */ - HUGEPAGE, - /* dirty_mult */ - FXP_INIT_PERCENT(25), - /* deferral_allowed */ - false, - /* hugify_delay_ms */ - 10 * 1000, - /* hugify_sync */ - false, - /* min_purge_interval_ms */ - 5 * 1000, - /* experimental_max_purge_nhp */ - -1 -}; + /* slab_max_alloc */ + ALLOC_MAX, + /* hugification_threshold */ + HUGEPAGE, + /* dirty_mult */ + FXP_INIT_PERCENT(25), + /* deferral_allowed */ + false, + /* hugify_delay_ms */ + 10 * 1000, + /* hugify_sync */ + false, + /* min_purge_interval_ms */ + 5 * 1000, + /* experimental_max_purge_nhp */ + -1}; static hpa_shard_opts_t test_hpa_shard_opts_purge = { - /* slab_max_alloc */ - HUGEPAGE, - /* hugification_threshold */ - 0.9 * HUGEPAGE, - /* dirty_mult */ - FXP_INIT_PERCENT(11), - /* deferral_allowed */ - true, - /* hugify_delay_ms */ - 0, - /* hugify_sync */ - false, - /* min_purge_interval_ms */ - 5 * 1000, - /* experimental_max_purge_nhp */ - -1 -}; + /* slab_max_alloc */ + HUGEPAGE, + /* hugification_threshold */ + 0.9 * HUGEPAGE, + /* dirty_mult */ + FXP_INIT_PERCENT(11), + /* deferral_allowed */ + true, + /* hugify_delay_ms */ + 0, + /* hugify_sync */ + false, + /* min_purge_interval_ms */ + 5 * 1000, + /* experimental_max_purge_nhp */ + -1}; static hpa_shard_t * create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts) { - bool err; + bool err; base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND, &ehooks_default_extent_hooks, /* metadata_use_hooks */ true); assert_ptr_not_null(base, ""); @@ -98,8 +96,8 @@ destroy_test_data(hpa_shard_t *shard) { TEST_BEGIN(test_alloc_max) { test_skip_if(!hpa_supported()); - hpa_shard_t *shard = create_test_data(&hpa_hooks_default, - &test_hpa_shard_opts_default); + hpa_shard_t *shard = create_test_data( + &hpa_hooks_default, &test_hpa_shard_opts_default); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); edata_t *edata; @@ -107,19 +105,19 @@ TEST_BEGIN(test_alloc_max) { /* Small max */ bool deferred_work_generated = false; edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false, - /* frequent_reuse */ false, &deferred_work_generated); + /* frequent_reuse */ false, &deferred_work_generated); expect_ptr_not_null(edata, "Allocation of small max failed"); edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false, false, /* frequent_reuse */ false, &deferred_work_generated); expect_ptr_null(edata, "Allocation of larger than small max succeeded"); - edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, - false, /* frequent_reuse */ true, &deferred_work_generated); + edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false, + /* frequent_reuse */ true, &deferred_work_generated); expect_ptr_not_null(edata, "Allocation of frequent reused failed"); - edata = pai_alloc(tsdn, &shard->pai, HUGEPAGE, PAGE, false, - false, /* frequent_reuse */ true, &deferred_work_generated); + edata = pai_alloc(tsdn, &shard->pai, HUGEPAGE, PAGE, false, false, + /* frequent_reuse */ true, &deferred_work_generated); expect_ptr_not_null(edata, "Allocation of frequent reused failed"); edata = pai_alloc(tsdn, &shard->pai, HUGEPAGE + PAGE, PAGE, false, @@ -133,8 +131,8 @@ TEST_END typedef struct mem_contents_s mem_contents_t; struct mem_contents_s { uintptr_t my_addr; - size_t size; - edata_t *my_edata; + size_t size; + edata_t *my_edata; rb_node(mem_contents_t) link; }; @@ -144,8 +142,7 @@ mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) { } typedef rb_tree(mem_contents_t) mem_tree_t; -rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link, - mem_contents_cmp); +rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link, mem_contents_cmp); static void node_assert_ordered(mem_contents_t *a, mem_contents_t *b) { @@ -191,14 +188,14 @@ node_remove(mem_tree_t *tree, edata_t *edata) { TEST_BEGIN(test_stress) { test_skip_if(!hpa_supported()); - hpa_shard_t *shard = create_test_data(&hpa_hooks_default, - &test_hpa_shard_opts_default); + hpa_shard_t *shard = create_test_data( + &hpa_hooks_default, &test_hpa_shard_opts_default); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); const size_t nlive_edatas_max = 500; - size_t nlive_edatas = 0; - edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *)); + size_t nlive_edatas = 0; + edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *)); /* * Nothing special about this constant; we're only fixing it for * consistency across runs. @@ -224,13 +221,14 @@ TEST_BEGIN(test_stress) { */ size_t npages_min = 1; size_t npages_max = ALLOC_MAX / PAGE; - size_t npages = npages_min + prng_range_zu(&prng_state, - npages_max - npages_min); + size_t npages = npages_min + + prng_range_zu( + &prng_state, npages_max - npages_min); edata_t *edata = pai_alloc(tsdn, &shard->pai, npages * PAGE, PAGE, false, false, false, &deferred_work_generated); - assert_ptr_not_null(edata, - "Unexpected allocation failure"); + assert_ptr_not_null( + edata, "Unexpected allocation failure"); live_edatas[nlive_edatas] = edata; nlive_edatas++; node_insert(&tree, edata, npages); @@ -239,7 +237,8 @@ TEST_BEGIN(test_stress) { if (nlive_edatas == 0) { continue; } - size_t victim = prng_range_zu(&prng_state, nlive_edatas); + size_t victim = prng_range_zu( + &prng_state, nlive_edatas); edata_t *to_free = live_edatas[victim]; live_edatas[victim] = live_edatas[nlive_edatas - 1]; nlive_edatas--; @@ -251,7 +250,7 @@ TEST_BEGIN(test_stress) { size_t ntreenodes = 0; for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL; - contents = mem_tree_next(&tree, contents)) { + contents = mem_tree_next(&tree, contents)) { ntreenodes++; node_check(&tree, contents); } @@ -264,8 +263,8 @@ TEST_BEGIN(test_stress) { for (size_t i = 0; i < nlive_edatas; i++) { edata_t *to_free = live_edatas[i]; node_remove(&tree, to_free); - pai_dalloc(tsdn, &shard->pai, to_free, - &deferred_work_generated); + pai_dalloc( + tsdn, &shard->pai, to_free, &deferred_work_generated); } hpa_shard_destroy(tsdn, shard); @@ -277,8 +276,7 @@ TEST_END static void expect_contiguous(edata_t **edatas, size_t nedatas) { for (size_t i = 0; i < nedatas; i++) { - size_t expected = (size_t)edata_base_get(edatas[0]) - + i * PAGE; + size_t expected = (size_t)edata_base_get(edatas[0]) + i * PAGE; expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]), "Mismatch at index %zu", i); } @@ -287,13 +285,13 @@ expect_contiguous(edata_t **edatas, size_t nedatas) { TEST_BEGIN(test_alloc_dalloc_batch) { test_skip_if(!hpa_supported()); - hpa_shard_t *shard = create_test_data(&hpa_hooks_default, - &test_hpa_shard_opts_default); + hpa_shard_t *shard = create_test_data( + &hpa_hooks_default, &test_hpa_shard_opts_default); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); bool deferred_work_generated = false; - enum {NALLOCS = 8}; + enum { NALLOCS = 8 }; edata_t *allocs[NALLOCS]; /* @@ -329,11 +327,11 @@ TEST_BEGIN(test_alloc_dalloc_batch) { for (size_t i = 0; i < NALLOCS / 2; i++) { edata_list_active_append(&allocs_list, allocs[i]); } - pai_dalloc_batch(tsdn, &shard->pai, &allocs_list, - &deferred_work_generated); + pai_dalloc_batch( + tsdn, &shard->pai, &allocs_list, &deferred_work_generated); for (size_t i = NALLOCS / 2; i < NALLOCS; i++) { - pai_dalloc(tsdn, &shard->pai, allocs[i], - &deferred_work_generated); + pai_dalloc( + tsdn, &shard->pai, allocs[i], &deferred_work_generated); } /* Reallocate (individually), and ensure reuse and contiguity. */ @@ -344,8 +342,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) { expect_ptr_not_null(allocs[i], "Unexpected alloc failure."); } void *new_base = edata_base_get(allocs[0]); - expect_ptr_eq(orig_base, new_base, - "Failed to reuse the allocated memory."); + expect_ptr_eq( + orig_base, new_base, "Failed to reuse the allocated memory."); expect_contiguous(allocs, NALLOCS); destroy_test_data(shard); @@ -429,7 +427,7 @@ TEST_BEGIN(test_defer_time) { bool deferred_work_generated = false; nstime_init(&defer_curtime, 0); - tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); edata_t *edatas[HUGEPAGE_PAGES]; for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, @@ -448,8 +446,8 @@ TEST_BEGIN(test_defer_time) { /* Purge. Recall that dirty_mult is .25. */ for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) { - pai_dalloc(tsdn, &shard->pai, edatas[i], - &deferred_work_generated); + pai_dalloc( + tsdn, &shard->pai, edatas[i], &deferred_work_generated); } hpa_shard_do_deferred_work(tsdn, shard); @@ -474,8 +472,7 @@ TEST_BEGIN(test_defer_time) { * We would be ineligible for hugification, had we not already met the * threshold before dipping below it. */ - pai_dalloc(tsdn, &shard->pai, edatas[0], - &deferred_work_generated); + pai_dalloc(tsdn, &shard->pai, edatas[0], &deferred_work_generated); /* Wait for the threshold again. */ nstime_init2(&defer_curtime, 22, 0); hpa_shard_do_deferred_work(tsdn, shard); @@ -491,8 +488,8 @@ TEST_END TEST_BEGIN(test_purge_no_infinite_loop) { test_skip_if(!hpa_supported()); - hpa_shard_t *shard = create_test_data(&hpa_hooks_default, - &test_hpa_shard_opts_purge); + hpa_shard_t *shard = create_test_data( + &hpa_hooks_default, &test_hpa_shard_opts_purge); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); /* @@ -500,14 +497,15 @@ TEST_BEGIN(test_purge_no_infinite_loop) { * criteria for huge page and at the same time do not allow hugify page * without triggering a purge. */ - const size_t npages = - test_hpa_shard_opts_purge.hugification_threshold / PAGE + 1; + const size_t npages = test_hpa_shard_opts_purge.hugification_threshold + / PAGE + + 1; const size_t size = npages * PAGE; - bool deferred_work_generated = false; + bool deferred_work_generated = false; edata_t *edata = pai_alloc(tsdn, &shard->pai, size, PAGE, - /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, - &deferred_work_generated); + /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, + &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected alloc failure"); hpa_shard_do_deferred_work(tsdn, shard); @@ -542,8 +540,8 @@ TEST_BEGIN(test_no_min_purge_interval) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, - false, false, &deferred_work_generated); + edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false, + false, &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected null edata"); pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated); hpa_shard_do_deferred_work(tsdn, shard); @@ -584,8 +582,8 @@ TEST_BEGIN(test_min_purge_interval) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, - false, false, &deferred_work_generated); + edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false, + false, &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected null edata"); pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated); hpa_shard_do_deferred_work(tsdn, shard); @@ -634,7 +632,7 @@ TEST_BEGIN(test_purge) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - enum {NALLOCS = 8 * HUGEPAGE_PAGES}; + enum { NALLOCS = 8 * HUGEPAGE_PAGES }; edata_t *edatas[NALLOCS]; for (int i = 0; i < NALLOCS; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, @@ -643,8 +641,8 @@ TEST_BEGIN(test_purge) { } /* Deallocate 3 hugepages out of 8. */ for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) { - pai_dalloc(tsdn, &shard->pai, edatas[i], - &deferred_work_generated); + pai_dalloc( + tsdn, &shard->pai, edatas[i], &deferred_work_generated); } nstime_init2(&defer_curtime, 6, 0); hpa_shard_do_deferred_work(tsdn, shard); @@ -702,7 +700,7 @@ TEST_BEGIN(test_experimental_max_purge_nhp) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - enum {NALLOCS = 8 * HUGEPAGE_PAGES}; + enum { NALLOCS = 8 * HUGEPAGE_PAGES }; edata_t *edatas[NALLOCS]; for (int i = 0; i < NALLOCS; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, @@ -711,8 +709,8 @@ TEST_BEGIN(test_experimental_max_purge_nhp) { } /* Deallocate 3 hugepages out of 8. */ for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) { - pai_dalloc(tsdn, &shard->pai, edatas[i], - &deferred_work_generated); + pai_dalloc( + tsdn, &shard->pai, edatas[i], &deferred_work_generated); } nstime_init2(&defer_curtime, 6, 0); hpa_shard_do_deferred_work(tsdn, shard); @@ -749,8 +747,7 @@ TEST_BEGIN(test_experimental_max_purge_nhp) { TEST_END TEST_BEGIN(test_vectorized_opt_eq_zero) { - test_skip_if(!hpa_supported() || - (opt_process_madvise_max_batch != 0)); + test_skip_if(!hpa_supported() || (opt_process_madvise_max_batch != 0)); hpa_hooks_t hooks; hooks.map = &defer_test_map; @@ -770,11 +767,11 @@ TEST_BEGIN(test_vectorized_opt_eq_zero) { ndefer_purge_calls = 0; hpa_shard_t *shard = create_test_data(&hooks, &opts); - bool deferred_work_generated = false; + bool deferred_work_generated = false; nstime_init(&defer_curtime, 0); - tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, - false, false, &deferred_work_generated); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false, + false, &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected null edata"); pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated); hpa_shard_do_deferred_work(tsdn, shard); @@ -800,15 +797,9 @@ main(void) { (void)mem_tree_iter; (void)mem_tree_reverse_iter; (void)mem_tree_destroy; - return test_no_reentrancy( - test_alloc_max, - test_stress, - test_alloc_dalloc_batch, - test_defer_time, - test_purge_no_infinite_loop, - test_no_min_purge_interval, - test_min_purge_interval, - test_purge, - test_experimental_max_purge_nhp, - test_vectorized_opt_eq_zero); + return test_no_reentrancy(test_alloc_max, test_stress, + test_alloc_dalloc_batch, test_defer_time, + test_purge_no_infinite_loop, test_no_min_purge_interval, + test_min_purge_interval, test_purge, + test_experimental_max_purge_nhp, test_vectorized_opt_eq_zero); } diff --git a/test/unit/hpa_background_thread.c b/test/unit/hpa_background_thread.c index 93f046b5..80cf2fed 100644 --- a/test/unit/hpa_background_thread.c +++ b/test/unit/hpa_background_thread.c @@ -12,7 +12,7 @@ TEST_BEGIN(test_hpa_background_thread_a0_initialized) { test_skip_if(!have_background_thread); test_skip_if(san_guard_enabled()); - bool enabled = false; + bool enabled = false; size_t sz = sizeof(enabled); int err = mallctl("background_thread", (void *)&enabled, &sz, NULL, 0); expect_d_eq(err, 0, "Unexpected mallctl() failure"); @@ -38,7 +38,7 @@ sleep_for_background_thread_interval(void) { static unsigned create_arena(void) { unsigned arena_ind; - size_t sz; + size_t sz; sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2), @@ -48,17 +48,17 @@ create_arena(void) { static size_t get_empty_ndirty(unsigned arena_ind) { - int err; - size_t ndirty_huge; - size_t ndirty_nonhuge; + int err; + size_t ndirty_huge; + size_t ndirty_nonhuge; uint64_t epoch = 1; - size_t sz = sizeof(epoch); - err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch, - sizeof(epoch)); + size_t sz = sizeof(epoch); + err = je_mallctl( + "epoch", (void *)&epoch, &sz, (void *)&epoch, sizeof(epoch)); expect_d_eq(0, err, "Unexpected mallctl() failure"); size_t mib[6]; - size_t miblen = sizeof(mib)/sizeof(mib[0]); + size_t miblen = sizeof(mib) / sizeof(mib[0]); err = mallctlnametomib( "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib, &miblen); @@ -70,8 +70,7 @@ get_empty_ndirty(unsigned arena_ind) { expect_d_eq(0, err, "Unexpected mallctlbymib() failure"); err = mallctlnametomib( - "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib, - &miblen); + "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib, &miblen); expect_d_eq(0, err, "Unexpected mallctlnametomib() failure"); sz = sizeof(ndirty_huge); @@ -85,20 +84,20 @@ get_empty_ndirty(unsigned arena_ind) { static void set_background_thread_enabled(bool enabled) { int err; - err = je_mallctl("background_thread", NULL, NULL, &enabled, - sizeof(enabled)); + err = je_mallctl( + "background_thread", NULL, NULL, &enabled, sizeof(enabled)); expect_d_eq(0, err, "Unexpected mallctl failure"); } static void wait_until_thread_is_enabled(unsigned arena_id) { - tsd_t* tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch(); bool sleeping = false; - int iterations = 0; + int iterations = 0; do { - background_thread_info_t *info = - background_thread_info_get(arena_id); + background_thread_info_t *info = background_thread_info_get( + arena_id); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); sleeping = background_thread_indefinite_sleep(info); @@ -113,10 +112,8 @@ expect_purging(unsigned arena_ind) { expect_zu_eq(0, empty_ndirty, "Expected arena to start unused."); void *ptrs[2]; - ptrs[0] = mallocx(PAGE, - MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind)); - ptrs[1] = mallocx(PAGE, - MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind)); + ptrs[0] = mallocx(PAGE, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind)); + ptrs[1] = mallocx(PAGE, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind)); empty_ndirty = get_empty_ndirty(arena_ind); expect_zu_eq(0, empty_ndirty, "All pages should be active"); @@ -151,15 +148,14 @@ expect_deferred_purging(unsigned arena_ind) { */ bool observed_dirty_page = false; for (int i = 0; i < 10; i++) { - void *ptr = mallocx(PAGE, - MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind)); + void *ptr = mallocx( + PAGE, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind)); empty_ndirty = get_empty_ndirty(arena_ind); expect_zu_eq(0, empty_ndirty, "All pages should be active"); dallocx(ptr, MALLOCX_TCACHE_NONE); empty_ndirty = get_empty_ndirty(arena_ind); - expect_true(empty_ndirty == 0 || empty_ndirty == 1 || - opt_prof, "Unexpected extra dirty page count: %zu", - empty_ndirty); + expect_true(empty_ndirty == 0 || empty_ndirty == 1 || opt_prof, + "Unexpected extra dirty page count: %zu", empty_ndirty); if (empty_ndirty > 0) { observed_dirty_page = true; break; @@ -173,8 +169,8 @@ expect_deferred_purging(unsigned arena_ind) { * time. Retry 100 times max before bailing out. */ unsigned retry = 0; - while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 && - (retry++ < 100)) { + while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 + && (retry++ < 100)) { sleep_for_background_thread_interval(); } diff --git a/test/unit/hpa_vectorized_madvise.c b/test/unit/hpa_vectorized_madvise.c index 6770a9fa..8df54d06 100644 --- a/test/unit/hpa_vectorized_madvise.c +++ b/test/unit/hpa_vectorized_madvise.c @@ -13,36 +13,35 @@ struct test_data_s { * Must be the first member -- we convert back and forth between the * test_data_t and the hpa_shard_t; */ - hpa_shard_t shard; + hpa_shard_t shard; hpa_central_t central; - base_t *base; + base_t *base; edata_cache_t shard_edata_cache; emap_t emap; }; static hpa_shard_opts_t test_hpa_shard_opts_default = { - /* slab_max_alloc */ - ALLOC_MAX, - /* hugification_threshold */ - HUGEPAGE, - /* dirty_mult */ - FXP_INIT_PERCENT(25), - /* deferral_allowed */ - false, - /* hugify_delay_ms */ - 10 * 1000, - /* hugify_sync */ - false, - /* min_purge_interval_ms */ - 5 * 1000, - /* experimental_max_purge_nhp */ - -1 -}; + /* slab_max_alloc */ + ALLOC_MAX, + /* hugification_threshold */ + HUGEPAGE, + /* dirty_mult */ + FXP_INIT_PERCENT(25), + /* deferral_allowed */ + false, + /* hugify_delay_ms */ + 10 * 1000, + /* hugify_sync */ + false, + /* min_purge_interval_ms */ + 5 * 1000, + /* experimental_max_purge_nhp */ + -1}; static hpa_shard_t * create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts) { - bool err; + bool err; base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND, &ehooks_default_extent_hooks, /* metadata_use_hooks */ true); assert_ptr_not_null(base, ""); @@ -108,7 +107,8 @@ defer_vectorized_purge(void *vec, size_t vlen, size_t nbytes) { } static bool defer_vec_purge_didfail = false; -static bool defer_vectorized_purge_fail(void *vec, size_t vlen, size_t nbytes) { +static bool +defer_vectorized_purge_fail(void *vec, size_t vlen, size_t nbytes) { (void)vec; (void)vlen; (void)nbytes; @@ -141,8 +141,7 @@ defer_test_ms_since(nstime_t *past_time) { } TEST_BEGIN(test_vectorized_failure_fallback) { - test_skip_if(!hpa_supported() || - (opt_process_madvise_max_batch == 0)); + test_skip_if(!hpa_supported() || (opt_process_madvise_max_batch == 0)); hpa_hooks_t hooks; hooks.map = &defer_test_map; @@ -166,8 +165,8 @@ TEST_BEGIN(test_vectorized_failure_fallback) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, - false, false, &deferred_work_generated); + edata_t *edata = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, false, + false, &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected null edata"); pai_dalloc(tsdn, &shard->pai, edata, &deferred_work_generated); hpa_shard_do_deferred_work(tsdn, shard); @@ -181,9 +180,8 @@ TEST_BEGIN(test_vectorized_failure_fallback) { TEST_END TEST_BEGIN(test_more_regions_purged_from_one_page) { - test_skip_if(!hpa_supported() || - (opt_process_madvise_max_batch == 0) || - HUGEPAGE_PAGES <= 4); + test_skip_if(!hpa_supported() || (opt_process_madvise_max_batch == 0) + || HUGEPAGE_PAGES <= 4); hpa_hooks_t hooks; hooks.map = &defer_test_map; @@ -208,7 +206,7 @@ TEST_BEGIN(test_more_regions_purged_from_one_page) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - enum {NALLOCS = 8 * HUGEPAGE_PAGES}; + enum { NALLOCS = 8 * HUGEPAGE_PAGES }; edata_t *edatas[NALLOCS]; for (int i = 0; i < NALLOCS; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, @@ -249,12 +247,10 @@ TEST_BEGIN(test_more_regions_purged_from_one_page) { } TEST_END -size_t -hpa_purge_max_batch_size_for_test_set(size_t new_size); +size_t hpa_purge_max_batch_size_for_test_set(size_t new_size); TEST_BEGIN(test_more_pages_than_batch_page_size) { - test_skip_if(!hpa_supported() || - (opt_process_madvise_max_batch == 0) || - HUGEPAGE_PAGES <= 4); + test_skip_if(!hpa_supported() || (opt_process_madvise_max_batch == 0) + || HUGEPAGE_PAGES <= 4); size_t old_page_batch = hpa_purge_max_batch_size_for_test_set(1); @@ -281,7 +277,7 @@ TEST_BEGIN(test_more_pages_than_batch_page_size) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - enum {NALLOCS = 8 * HUGEPAGE_PAGES}; + enum { NALLOCS = 8 * HUGEPAGE_PAGES }; edata_t *edatas[NALLOCS]; for (int i = 0; i < NALLOCS; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, @@ -289,8 +285,8 @@ TEST_BEGIN(test_more_pages_than_batch_page_size) { expect_ptr_not_null(edatas[i], "Unexpected null edata"); } for (int i = 0; i < 3 * (int)HUGEPAGE_PAGES; i++) { - pai_dalloc(tsdn, &shard->pai, edatas[i], - &deferred_work_generated); + pai_dalloc( + tsdn, &shard->pai, edatas[i], &deferred_work_generated); } hpa_shard_do_deferred_work(tsdn, shard); @@ -321,8 +317,7 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_vectorized_failure_fallback, + return test_no_reentrancy(test_vectorized_failure_fallback, test_more_regions_purged_from_one_page, test_more_pages_than_batch_page_size); } diff --git a/test/unit/hpa_vectorized_madvise_large_batch.c b/test/unit/hpa_vectorized_madvise_large_batch.c index 561da7a2..a5766620 100644 --- a/test/unit/hpa_vectorized_madvise_large_batch.c +++ b/test/unit/hpa_vectorized_madvise_large_batch.c @@ -13,36 +13,35 @@ struct test_data_s { * Must be the first member -- we convert back and forth between the * test_data_t and the hpa_shard_t; */ - hpa_shard_t shard; + hpa_shard_t shard; hpa_central_t central; - base_t *base; + base_t *base; edata_cache_t shard_edata_cache; emap_t emap; }; static hpa_shard_opts_t test_hpa_shard_opts_default = { - /* slab_max_alloc */ - ALLOC_MAX, - /* hugification_threshold */ - HUGEPAGE, - /* dirty_mult */ - FXP_INIT_PERCENT(25), - /* deferral_allowed */ - false, - /* hugify_delay_ms */ - 10 * 1000, - /* hugify_sync */ - false, - /* min_purge_interval_ms */ - 5 * 1000, - /* experimental_max_purge_nhp */ - -1 -}; + /* slab_max_alloc */ + ALLOC_MAX, + /* hugification_threshold */ + HUGEPAGE, + /* dirty_mult */ + FXP_INIT_PERCENT(25), + /* deferral_allowed */ + false, + /* hugify_delay_ms */ + 10 * 1000, + /* hugify_sync */ + false, + /* min_purge_interval_ms */ + 5 * 1000, + /* experimental_max_purge_nhp */ + -1}; static hpa_shard_t * create_test_data(const hpa_hooks_t *hooks, hpa_shard_opts_t *opts) { - bool err; + bool err; base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND, &ehooks_default_extent_hooks, /* metadata_use_hooks */ true); assert_ptr_not_null(base, ""); @@ -132,8 +131,8 @@ defer_test_ms_since(nstime_t *past_time) { } TEST_BEGIN(test_vectorized_purge) { - test_skip_if(!hpa_supported() || - opt_process_madvise_max_batch == 0 || HUGEPAGE_PAGES <= 4); + test_skip_if(!hpa_supported() || opt_process_madvise_max_batch == 0 + || HUGEPAGE_PAGES <= 4); assert(opt_process_madvise_max_batch == 64); hpa_hooks_t hooks; @@ -159,7 +158,7 @@ TEST_BEGIN(test_vectorized_purge) { nstime_init(&defer_curtime, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - enum {NALLOCS = 8 * HUGEPAGE_PAGES}; + enum { NALLOCS = 8 * HUGEPAGE_PAGES }; edata_t *edatas[NALLOCS]; for (int i = 0; i < NALLOCS; i++) { edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false, @@ -192,6 +191,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_vectorized_purge); + return test_no_reentrancy(test_vectorized_purge); } diff --git a/test/unit/hpdata.c b/test/unit/hpdata.c index 995ab77b..2329f065 100644 --- a/test/unit/hpdata.c +++ b/test/unit/hpdata.c @@ -69,23 +69,25 @@ TEST_BEGIN(test_purge_simple) { hpdata_alloc_allowed_set(&hpdata, false); hpdata_purge_state_t purge_state; - size_t nranges; + size_t nranges; size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state, &nranges); expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge, ""); expect_zu_eq(1, nranges, "All dirty pages in a single range"); - void *purge_addr; + void *purge_addr; size_t purge_size; - bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); + bool got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); expect_true(got_result, ""); expect_ptr_eq(HPDATA_ADDR, purge_addr, ""); expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, ""); - got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); - expect_false(got_result, "Unexpected additional purge range: " - "extent at %p of size %zu", purge_addr, purge_size); + got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); + expect_false(got_result, + "Unexpected additional purge range: " + "extent at %p of size %zu", + purge_addr, purge_size); hpdata_purge_end(&hpdata, &purge_state); expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, ""); @@ -102,7 +104,8 @@ TEST_BEGIN(test_purge_intervening_dalloc) { hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE); /* Allocate the first 3/4 of the pages. */ - void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE); + void *alloc = hpdata_reserve_alloc( + &hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE); expect_ptr_eq(alloc, HPDATA_ADDR, ""); /* Free the first 1/4 and the third 1/4 of the pages. */ @@ -115,16 +118,16 @@ TEST_BEGIN(test_purge_intervening_dalloc) { hpdata_alloc_allowed_set(&hpdata, false); hpdata_purge_state_t purge_state; - size_t nranges; + size_t nranges; size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state, &nranges); expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge, ""); expect_zu_eq(2, nranges, "First quarter and last half"); - void *purge_addr; + void *purge_addr; size_t purge_size; /* First purge. */ - bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); + bool got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); expect_true(got_result, ""); expect_ptr_eq(HPDATA_ADDR, purge_addr, ""); expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, ""); @@ -135,18 +138,20 @@ TEST_BEGIN(test_purge_intervening_dalloc) { HUGEPAGE_PAGES / 4 * PAGE); /* Now continue purging. */ - got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); + got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); expect_true(got_result, ""); expect_ptr_eq( (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE), purge_addr, ""); expect_zu_ge(HUGEPAGE_PAGES / 4 * PAGE, purge_size, ""); - got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); - expect_false(got_result, "Unexpected additional purge range: " - "extent at %p of size %zu", purge_addr, purge_size); + got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); + expect_false(got_result, + "Unexpected additional purge range: " + "extent at %p of size %zu", + purge_addr, purge_size); hpdata_purge_end(&hpdata, &purge_state); @@ -155,19 +160,20 @@ TEST_BEGIN(test_purge_intervening_dalloc) { TEST_END TEST_BEGIN(test_purge_over_retained) { - void *purge_addr; + void *purge_addr; size_t purge_size; hpdata_t hpdata; hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE); /* Allocate the first 3/4 of the pages. */ - void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE); + void *alloc = hpdata_reserve_alloc( + &hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE); expect_ptr_eq(alloc, HPDATA_ADDR, ""); /* Free the second quarter. */ - void *second_quarter = - (void *)((uintptr_t)alloc + HUGEPAGE_PAGES / 4 * PAGE); + void *second_quarter = (void *)((uintptr_t)alloc + + HUGEPAGE_PAGES / 4 * PAGE); hpdata_unreserve(&hpdata, second_quarter, HUGEPAGE_PAGES / 4 * PAGE); expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, ""); @@ -175,21 +181,24 @@ TEST_BEGIN(test_purge_over_retained) { /* Purge the second quarter. */ hpdata_alloc_allowed_set(&hpdata, false); hpdata_purge_state_t purge_state; - size_t nranges; - size_t to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state, &nranges); + size_t nranges; + size_t to_purge_dirty = hpdata_purge_begin( + &hpdata, &purge_state, &nranges); expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge_dirty, ""); expect_zu_eq(1, nranges, "Second quarter only"); - bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); + bool got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); expect_true(got_result, ""); expect_ptr_eq(second_quarter, purge_addr, ""); expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, ""); - got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); - expect_false(got_result, "Unexpected additional purge range: " - "extent at %p of size %zu", purge_addr, purge_size); + got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); + expect_false(got_result, + "Unexpected additional purge range: " + "extent at %p of size %zu", + purge_addr, purge_size); hpdata_purge_end(&hpdata, &purge_state); expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, ""); @@ -209,16 +218,18 @@ TEST_BEGIN(test_purge_over_retained) { expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge_dirty, ""); expect_zu_eq(1, nranges, "Single range expected"); - got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); + got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); expect_true(got_result, ""); expect_ptr_eq(HPDATA_ADDR, purge_addr, ""); expect_zu_eq(3 * HUGEPAGE_PAGES / 4 * PAGE, purge_size, ""); - got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr, - &purge_size); - expect_false(got_result, "Unexpected additional purge range: " - "extent at %p of size %zu", purge_addr, purge_size); + got_result = hpdata_purge_next( + &hpdata, &purge_state, &purge_addr, &purge_size); + expect_false(got_result, + "Unexpected additional purge range: " + "extent at %p of size %zu", + purge_addr, purge_size); hpdata_purge_end(&hpdata, &purge_state); expect_zu_eq(hpdata_ntouched_get(&hpdata), 0, ""); @@ -241,11 +252,9 @@ TEST_BEGIN(test_hugify) { } TEST_END -int main(void) { - return test_no_reentrancy( - test_reserve_alloc, - test_purge_simple, - test_purge_intervening_dalloc, - test_purge_over_retained, +int +main(void) { + return test_no_reentrancy(test_reserve_alloc, test_purge_simple, + test_purge_intervening_dalloc, test_purge_over_retained, test_hugify); } diff --git a/test/unit/huge.c b/test/unit/huge.c index 53f6577b..70abe4ac 100644 --- a/test/unit/huge.c +++ b/test/unit/huge.c @@ -8,38 +8,40 @@ const char *malloc_conf = "oversize_threshold:2097152"; TEST_BEGIN(huge_bind_thread) { unsigned arena1, arena2; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); /* Bind to a manual arena. */ expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, "Failed to create arena"); - expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena1, - sizeof(arena1)), 0, "Fail to bind thread"); + expect_d_eq( + mallctl("thread.arena", NULL, NULL, &arena1, sizeof(arena1)), 0, + "Fail to bind thread"); void *ptr = mallocx(HUGE_SZ, 0); expect_ptr_not_null(ptr, "Fail to allocate huge size"); - expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, - sizeof(ptr)), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); expect_u_eq(arena1, arena2, "Wrong arena used after binding"); dallocx(ptr, 0); /* Switch back to arena 0. */ - test_skip_if(have_percpu_arena && - PERCPU_ARENA_ENABLED(opt_percpu_arena)); + test_skip_if( + have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); arena2 = 0; - expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena2, - sizeof(arena2)), 0, "Fail to bind thread"); + expect_d_eq( + mallctl("thread.arena", NULL, NULL, &arena2, sizeof(arena2)), 0, + "Fail to bind thread"); ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); - expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, - sizeof(ptr)), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); expect_u_eq(arena2, 0, "Wrong arena used after binding"); dallocx(ptr, MALLOCX_TCACHE_NONE); /* Then huge allocation should use the huge arena. */ ptr = mallocx(HUGE_SZ, 0); expect_ptr_not_null(ptr, "Fail to allocate huge size"); - expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, - sizeof(ptr)), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); expect_u_ne(arena2, 0, "Wrong arena used after binding"); expect_u_ne(arena1, arena2, "Wrong arena used after binding"); dallocx(ptr, 0); @@ -48,25 +50,26 @@ TEST_END TEST_BEGIN(huge_mallocx) { unsigned arena1, arena2; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, "Failed to create arena"); void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1)); expect_ptr_not_null(huge, "Fail to allocate huge size"); - expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge, - sizeof(huge)), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge, sizeof(huge)), + 0, "Unexpected mallctl() failure"); expect_u_eq(arena1, arena2, "Wrong arena used for mallocx"); dallocx(huge, MALLOCX_ARENA(arena1)); void *huge2 = mallocx(HUGE_SZ, 0); expect_ptr_not_null(huge, "Fail to allocate huge size"); - expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2, - sizeof(huge2)), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("arenas.lookup", &arena2, &sz, &huge2, sizeof(huge2)), 0, + "Unexpected mallctl() failure"); expect_u_ne(arena1, arena2, "Huge allocation should not come from the manual arena."); - expect_u_ne(arena2, 0, - "Huge allocation should not come from the arena 0."); + expect_u_ne( + arena2, 0, "Huge allocation should not come from the arena 0."); dallocx(huge2, 0); } TEST_END @@ -82,30 +85,27 @@ TEST_BEGIN(huge_allocation) { expect_u_gt(arena1, 0, "Huge allocation should not come from arena 0"); dallocx(ptr, 0); - test_skip_if(have_percpu_arena && - PERCPU_ARENA_ENABLED(opt_percpu_arena)); + test_skip_if( + have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); ptr = mallocx(HUGE_SZ >> 1, 0); expect_ptr_not_null(ptr, "Fail to allocate half huge size"); - expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, - sizeof(ptr)), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); expect_u_ne(arena1, arena2, "Wrong arena used for half huge"); dallocx(ptr, 0); ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); expect_ptr_not_null(ptr, "Fail to allocate small size"); - expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, - sizeof(ptr)), 0, "Unexpected mallctl() failure"); - expect_u_ne(arena1, arena2, - "Huge and small should be from different arenas"); + expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); + expect_u_ne( + arena1, arena2, "Huge and small should be from different arenas"); dallocx(ptr, 0); } TEST_END int main(void) { - return test( - huge_allocation, - huge_mallocx, - huge_bind_thread); + return test(huge_allocation, huge_mallocx, huge_bind_thread); } diff --git a/test/unit/inspect.c b/test/unit/inspect.c index fe59e597..8111e4a5 100644 --- a/test/unit/inspect.c +++ b/test/unit/inspect.c @@ -1,27 +1,30 @@ #include "test/jemalloc_test.h" -#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \ - assert_d_eq(mallctl("experimental.utilization." node, \ - a, b, c, d), EINVAL, "Should fail when " why_inval); \ - assert_zu_eq(out_sz, out_sz_ref, \ - "Output size touched when given invalid arguments"); \ - assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \ - "Output content touched when given invalid arguments"); \ -} while (0) +#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) \ + do { \ + assert_d_eq( \ + mallctl("experimental.utilization." node, a, b, c, d), \ + EINVAL, "Should fail when " why_inval); \ + assert_zu_eq(out_sz, out_sz_ref, \ + "Output size touched when given invalid arguments"); \ + assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content touched when given invalid arguments"); \ + } while (0) -#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \ +#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \ TEST_UTIL_EINVAL("query", a, b, c, d, why_inval) -#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \ +#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \ TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval) -#define TEST_UTIL_VALID(node) do { \ - assert_d_eq(mallctl("experimental.utilization." node, \ - out, &out_sz, in, in_sz), 0, \ - "Should return 0 on correct arguments"); \ - expect_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \ - expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \ - "Output content should be changed"); \ -} while (0) +#define TEST_UTIL_VALID(node) \ + do { \ + assert_d_eq(mallctl("experimental.utilization." node, out, \ + &out_sz, in, in_sz), \ + 0, "Should return 0 on correct arguments"); \ + expect_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \ + expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content should be changed"); \ + } while (0) #define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query") @@ -34,21 +37,19 @@ TEST_BEGIN(test_query) { * numerically unrelated to any size boundaries. */ for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; - sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) { - void *p = mallocx(sz, 0); + sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) { + void *p = mallocx(sz, 0); void **in = &p; size_t in_sz = sizeof(const void *); size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; - void *out = mallocx(out_sz, 0); - void *out_ref = mallocx(out_sz, 0); + void *out = mallocx(out_sz, 0); + void *out_ref = mallocx(out_sz, 0); size_t out_sz_ref = out_sz; - assert_ptr_not_null(p, - "test pointer allocation failed"); - assert_ptr_not_null(out, - "test output allocation failed"); - assert_ptr_not_null(out_ref, - "test reference output allocation failed"); + assert_ptr_not_null(p, "test pointer allocation failed"); + assert_ptr_not_null(out, "test output allocation failed"); + assert_ptr_not_null( + out_ref, "test reference output allocation failed"); #define SLABCUR_READ(out) (*(void **)out) #define COUNTS(out) ((size_t *)((void **)out + 1)) @@ -64,21 +65,18 @@ TEST_BEGIN(test_query) { memcpy(out_ref, out, out_sz); /* Test invalid argument(s) errors */ - TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, - "old is NULL"); - TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, - "oldlenp is NULL"); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, - "newp is NULL"); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, - "newlen is zero"); + TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); + TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); + TEST_UTIL_QUERY_EINVAL( + out, &out_sz, NULL, in_sz, "newp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, "newlen is zero"); in_sz -= 1; - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, - "invalid newlen"); + TEST_UTIL_QUERY_EINVAL( + out, &out_sz, in, in_sz, "invalid newlen"); in_sz += 1; out_sz_ref = out_sz -= 2 * sizeof(size_t); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, - "invalid *oldlenp"); + TEST_UTIL_QUERY_EINVAL( + out, &out_sz, in, in_sz, "invalid *oldlenp"); out_sz_ref = out_sz += 2 * sizeof(size_t); /* Examine output for valid call */ @@ -100,8 +98,9 @@ TEST_BEGIN(test_query) { "Extent region count exceeded size"); expect_zu_ne(NREGS_READ(out), 0, "Extent region count must be positive"); - expect_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out) - != NULL && SLABCUR_READ(out) <= p), + expect_true(NFREE_READ(out) == 0 + || (SLABCUR_READ(out) != NULL + && SLABCUR_READ(out) <= p), "Allocation should follow first fit principle"); if (config_stats) { @@ -117,8 +116,8 @@ TEST_BEGIN(test_query) { BIN_NREGS_READ(out), "Extent region count exceeded " "bin region count"); - expect_zu_eq(BIN_NREGS_READ(out) - % NREGS_READ(out), 0, + expect_zu_eq( + BIN_NREGS_READ(out) % NREGS_READ(out), 0, "Bin region count isn't a multiple of " "extent region count"); expect_zu_le( @@ -171,10 +170,10 @@ TEST_BEGIN(test_batch) { * numerically unrelated to any size boundaries. */ for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; - sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) { - void *p = mallocx(sz, 0); - void *q = mallocx(sz, 0); - void *in[] = {p, q}; + sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) { + void *p = mallocx(sz, 0); + void *q = mallocx(sz, 0); + void *in[] = {p, q}; size_t in_sz = sizeof(const void *) * 2; size_t out[] = {-1, -1, -1, -1, -1, -1}; size_t out_sz = sizeof(size_t) * 6; @@ -185,17 +184,14 @@ TEST_BEGIN(test_batch) { assert_ptr_not_null(q, "test pointer allocation failed"); /* Test invalid argument(s) errors */ - TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, - "old is NULL"); - TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, - "oldlenp is NULL"); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, - "newp is NULL"); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, - "newlen is zero"); + TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); + TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); + TEST_UTIL_BATCH_EINVAL( + out, &out_sz, NULL, in_sz, "newp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, "newlen is zero"); in_sz -= 1; - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, - "newlen is not an exact multiple"); + TEST_UTIL_BATCH_EINVAL( + out, &out_sz, in, in_sz, "newlen is not an exact multiple"); in_sz += 1; out_sz_ref = out_sz -= 2 * sizeof(size_t); TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, @@ -206,8 +202,8 @@ TEST_BEGIN(test_batch) { "*oldlenp and newlen do not match"); in_sz += sizeof(const void *); - /* Examine output for valid calls */ -#define TEST_EQUAL_REF(i, message) \ + /* Examine output for valid calls */ +#define TEST_EQUAL_REF(i, message) \ assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message) #define NFREE_READ(out, i) out[(i) * 3] @@ -238,8 +234,8 @@ TEST_BEGIN(test_batch) { expect_zu_eq(NREGS_READ(out, 0), 1, "Extent region count should be one"); } - TEST_EQUAL_REF(1, - "Should not overwrite content beyond what's needed"); + TEST_EQUAL_REF( + 1, "Should not overwrite content beyond what's needed"); in_sz *= 2; out_sz_ref = out_sz *= 2; diff --git a/test/unit/junk.c b/test/unit/junk.c index 6c5b8beb..80f51e15 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -1,9 +1,9 @@ #include "test/jemalloc_test.h" -#define arraylen(arr) (sizeof(arr)/sizeof(arr[0])) +#define arraylen(arr) (sizeof(arr) / sizeof(arr[0])) static size_t ptr_ind; static void *volatile ptrs[100]; -static void *last_junked_ptr; +static void *last_junked_ptr; static size_t last_junked_usize; static void @@ -21,17 +21,17 @@ test_junk(void *ptr, size_t usize) { static void do_allocs(size_t size, bool zero, size_t lg_align) { -#define JUNK_ALLOC(...) \ - do { \ - assert(ptr_ind + 1 < arraylen(ptrs)); \ - void *ptr = __VA_ARGS__; \ - assert_ptr_not_null(ptr, ""); \ - ptrs[ptr_ind++] = ptr; \ - if (opt_junk_alloc && !zero) { \ - expect_ptr_eq(ptr, last_junked_ptr, ""); \ - expect_zu_eq(last_junked_usize, \ - TEST_MALLOC_SIZE(ptr), ""); \ - } \ +#define JUNK_ALLOC(...) \ + do { \ + assert(ptr_ind + 1 < arraylen(ptrs)); \ + void *ptr = __VA_ARGS__; \ + assert_ptr_not_null(ptr, ""); \ + ptrs[ptr_ind++] = ptr; \ + if (opt_junk_alloc && !zero) { \ + expect_ptr_eq(ptr, last_junked_ptr, ""); \ + expect_zu_eq( \ + last_junked_usize, TEST_MALLOC_SIZE(ptr), ""); \ + } \ } while (0) if (!zero && lg_align == 0) { JUNK_ALLOC(malloc(size)); @@ -51,21 +51,20 @@ do_allocs(size_t size, bool zero, size_t lg_align) { #endif int zero_flag = zero ? MALLOCX_ZERO : 0; JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align))); - JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align) - | MALLOCX_TCACHE_NONE)); + JUNK_ALLOC(mallocx(size, + zero_flag | MALLOCX_LG_ALIGN(lg_align) | MALLOCX_TCACHE_NONE)); if (lg_align >= LG_SIZEOF_PTR) { void *memalign_result; - int err = posix_memalign(&memalign_result, (1 << lg_align), - size); + int err = posix_memalign( + &memalign_result, (1 << lg_align), size); assert_d_eq(err, 0, ""); JUNK_ALLOC(memalign_result); } } TEST_BEGIN(test_junk_alloc_free) { - bool zerovals[] = {false, true}; - size_t sizevals[] = { - 1, 8, 100, 1000, 100*1000 + bool zerovals[] = {false, true}; + size_t sizevals[] = {1, 8, 100, 1000, 100 * 1000 /* * Memory allocation failure is a real possibility in 32-bit mode. * Rather than try to check in the face of resource exhaustion, we just @@ -75,49 +74,49 @@ TEST_BEGIN(test_junk_alloc_free) { * mechanisms; but this is in fact the case. */ #if LG_SIZEOF_PTR == 3 - , 10 * 1000 * 1000 + , + 10 * 1000 * 1000 #endif }; - size_t lg_alignvals[] = { - 0, 4, 10, 15, 16, LG_PAGE + size_t lg_alignvals[] = {0, 4, 10, 15, 16, LG_PAGE #if LG_SIZEOF_PTR == 3 - , 20, 24 + , + 20, 24 #endif }; -#define JUNK_FREE(...) \ - do { \ - do_allocs(size, zero, lg_align); \ - for (size_t n = 0; n < ptr_ind; n++) { \ - void *ptr = ptrs[n]; \ - __VA_ARGS__; \ - if (opt_junk_free) { \ - assert_ptr_eq(ptr, last_junked_ptr, \ - ""); \ - assert_zu_eq(usize, last_junked_usize, \ - ""); \ - } \ - reset(); \ - } \ +#define JUNK_FREE(...) \ + do { \ + do_allocs(size, zero, lg_align); \ + for (size_t n = 0; n < ptr_ind; n++) { \ + void *ptr = ptrs[n]; \ + __VA_ARGS__; \ + if (opt_junk_free) { \ + assert_ptr_eq(ptr, last_junked_ptr, ""); \ + assert_zu_eq(usize, last_junked_usize, ""); \ + } \ + reset(); \ + } \ } while (0) for (size_t i = 0; i < arraylen(zerovals); i++) { for (size_t j = 0; j < arraylen(sizevals); j++) { for (size_t k = 0; k < arraylen(lg_alignvals); k++) { - bool zero = zerovals[i]; + bool zero = zerovals[i]; size_t size = sizevals[j]; size_t lg_align = lg_alignvals[k]; - size_t usize = nallocx(size, - MALLOCX_LG_ALIGN(lg_align)); + size_t usize = nallocx( + size, MALLOCX_LG_ALIGN(lg_align)); JUNK_FREE(free(ptr)); JUNK_FREE(dallocx(ptr, 0)); JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE)); - JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN( - lg_align))); - JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN( - lg_align))); + JUNK_FREE( + dallocx(ptr, MALLOCX_LG_ALIGN(lg_align))); + JUNK_FREE(sdallocx( + ptr, usize, MALLOCX_LG_ALIGN(lg_align))); JUNK_FREE(sdallocx(ptr, usize, - MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align))); + MALLOCX_TCACHE_NONE + | MALLOCX_LG_ALIGN(lg_align))); if (opt_zero_realloc_action == zero_realloc_action_free) { JUNK_FREE(realloc(ptr, 0)); @@ -138,24 +137,24 @@ TEST_BEGIN(test_realloc_expand) { ptr = malloc(SC_SMALL_MAXCLASS); expanded = realloc(ptr, SC_LARGE_MINCLASS); expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], ""); - expect_zu_eq(last_junked_usize, - SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, ""); + expect_zu_eq( + last_junked_usize, SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, ""); free(expanded); /* rallocx(..., 0) */ ptr = malloc(SC_SMALL_MAXCLASS); expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0); expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], ""); - expect_zu_eq(last_junked_usize, - SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, ""); + expect_zu_eq( + last_junked_usize, SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, ""); free(expanded); /* rallocx(..., nonzero) */ ptr = malloc(SC_SMALL_MAXCLASS); expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE); expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], ""); - expect_zu_eq(last_junked_usize, - SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, ""); + expect_zu_eq( + last_junked_usize, SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, ""); free(expanded); /* rallocx(..., MALLOCX_ZERO) */ @@ -189,7 +188,5 @@ main(void) { * We check the last pointer junked. If a reentrant call happens, that * might be an internal allocation. */ - return test_no_reentrancy( - test_junk_alloc_free, - test_realloc_expand); + return test_no_reentrancy(test_junk_alloc_free, test_realloc_expand); } diff --git a/test/unit/log.c b/test/unit/log.c index c09b5896..bf4ee1ff 100644 --- a/test/unit/log.c +++ b/test/unit/log.c @@ -18,16 +18,13 @@ expect_no_logging(const char *names) { int count = 0; for (int i = 0; i < 10; i++) { - log_do_begin(log_l1) - count++; + log_do_begin(log_l1) count++; log_do_end(log_l1) - log_do_begin(log_l2) - count++; + log_do_begin(log_l2) count++; log_do_end(log_l2) - log_do_begin(log_l2_a) - count++; + log_do_begin(log_l2_a) count++; log_do_end(log_l2_a) } expect_d_eq(count, 0, "Disabled logging not ignored!"); @@ -57,8 +54,7 @@ TEST_BEGIN(test_log_enabled_direct) { count = 0; update_log_var_names("l1"); for (int i = 0; i < 10; i++) { - log_do_begin(log_l1) - count++; + log_do_begin(log_l1) count++; log_do_end(log_l1) } expect_d_eq(count, 10, "Mis-logged!"); @@ -66,8 +62,7 @@ TEST_BEGIN(test_log_enabled_direct) { count = 0; update_log_var_names("l1.a"); for (int i = 0; i < 10; i++) { - log_do_begin(log_l1_a) - count++; + log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) } expect_d_eq(count, 10, "Mis-logged!"); @@ -75,12 +70,10 @@ TEST_BEGIN(test_log_enabled_direct) { count = 0; update_log_var_names("l1.a|abc|l2|def"); for (int i = 0; i < 10; i++) { - log_do_begin(log_l1_a) - count++; + log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) - log_do_begin(log_l2) - count++; + log_do_begin(log_l2) count++; log_do_end(log_l2) } expect_d_eq(count, 20, "Mis-logged!"); @@ -108,28 +101,22 @@ TEST_BEGIN(test_log_enabled_indirect) { /* 4 are on total, so should sum to 40. */ int count = 0; for (int i = 0; i < 10; i++) { - log_do_begin(log_l1) - count++; + log_do_begin(log_l1) count++; log_do_end(log_l1) - log_do_begin(log_l1a) - count++; + log_do_begin(log_l1a) count++; log_do_end(log_l1a) - log_do_begin(log_l1_a) - count++; + log_do_begin(log_l1_a) count++; log_do_end(log_l1_a) - log_do_begin(log_l2_a) - count++; + log_do_begin(log_l2_a) count++; log_do_end(log_l2_a) - log_do_begin(log_l2_b_a) - count++; + log_do_begin(log_l2_b_a) count++; log_do_end(log_l2_b_a) - log_do_begin(log_l2_b_b) - count++; + log_do_begin(log_l2_b_b) count++; log_do_end(log_l2_b_b) } @@ -147,12 +134,10 @@ TEST_BEGIN(test_log_enabled_global) { int count = 0; for (int i = 0; i < 10; i++) { - log_do_begin(log_l1) - count++; + log_do_begin(log_l1) count++; log_do_end(log_l1) - log_do_begin(log_l2_a_a) - count++; + log_do_begin(log_l2_a_a) count++; log_do_end(log_l2_a_a) } expect_d_eq(count, 20, "Mis-logged!"); @@ -167,8 +152,7 @@ TEST_BEGIN(test_logs_if_no_init) { int count = 0; for (int i = 0; i < 10; i++) { - log_do_begin(l) - count++; + log_do_begin(l) count++; log_do_end(l) } expect_d_eq(count, 0, "Logging shouldn't happen if not initialized."); @@ -188,11 +172,7 @@ TEST_END int main(void) { - return test( - test_log_disabled, - test_log_enabled_direct, - test_log_enabled_indirect, - test_log_enabled_global, - test_logs_if_no_init, - test_log_only_format_string); + return test(test_log_disabled, test_log_enabled_direct, + test_log_enabled_indirect, test_log_enabled_global, + test_logs_if_no_init, test_log_only_format_string); } diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 838a4445..ac7506cf 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -6,26 +6,27 @@ TEST_BEGIN(test_mallctl_errors) { uint64_t epoch; - size_t sz; + size_t sz; expect_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, "mallctl() should return ENOENT for non-existent names"); expect_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), - EPERM, "mallctl() should return EPERM on attempt to write " + EPERM, + "mallctl() should return EPERM on attempt to write " "read-only value"); - expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, - sizeof(epoch)-1), EINVAL, - "mallctl() should return EINVAL for input size mismatch"); - expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, - sizeof(epoch)+1), EINVAL, - "mallctl() should return EINVAL for input size mismatch"); + expect_d_eq( + mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch) - 1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); + expect_d_eq( + mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch) + 1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); - sz = sizeof(epoch)-1; + sz = sizeof(epoch) - 1; expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); - sz = sizeof(epoch)+1; + sz = sizeof(epoch) + 1; expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); } @@ -35,7 +36,7 @@ TEST_BEGIN(test_mallctlnametomib_errors) { size_t mib[1]; size_t miblen; - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, "mallctlnametomib() should return ENOENT for non-existent names"); } @@ -43,34 +44,38 @@ TEST_END TEST_BEGIN(test_mallctlbymib_errors) { uint64_t epoch; - size_t sz; - size_t mib[1]; - size_t miblen; + size_t sz; + size_t mib[1]; + size_t miblen; - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("version", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); - expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", - strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " + expect_d_eq( + mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", strlen("0.0.0")), + EPERM, + "mallctl() should return EPERM on " "attempt to write read-only value"); - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, - sizeof(epoch)-1), EINVAL, + sizeof(epoch) - 1), + EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, - sizeof(epoch)+1), EINVAL, + sizeof(epoch) + 1), + EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); - sz = sizeof(epoch)-1; + sz = sizeof(epoch) - 1; expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); - sz = sizeof(epoch)+1; + sz = sizeof(epoch) + 1; expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); @@ -79,7 +84,7 @@ TEST_END TEST_BEGIN(test_mallctl_read_write) { uint64_t old_epoch, new_epoch; - size_t sz = sizeof(old_epoch); + size_t sz = sizeof(old_epoch); /* Blind. */ expect_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, @@ -92,14 +97,15 @@ TEST_BEGIN(test_mallctl_read_write) { expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Write. */ - expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch, - sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("epoch", NULL, NULL, (void *)&new_epoch, sizeof(new_epoch)), + 0, "Unexpected mallctl() failure"); expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read+write. */ expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, - (void *)&new_epoch, sizeof(new_epoch)), 0, - "Unexpected mallctl() failure"); + (void *)&new_epoch, sizeof(new_epoch)), + 0, "Unexpected mallctl() failure"); expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); } TEST_END @@ -133,10 +139,10 @@ TEST_BEGIN(test_mallctlnametomib_short_name) { TEST_END TEST_BEGIN(test_mallctlmibnametomib) { - size_t mib[4]; - size_t miblen = 4; + size_t mib[4]; + size_t miblen = 4; uint32_t result, result_ref; - size_t len_result = sizeof(uint32_t); + size_t len_result = sizeof(uint32_t); tsd_t *tsd = tsd_fetch(); @@ -178,20 +184,21 @@ TEST_BEGIN(test_mallctlmibnametomib) { /* Valid case. */ assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "nregs", &miblen), 0, ""); assert_zu_eq(miblen, 4, ""); - assert_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0), - 0, "Unexpected mallctlbymib() failure"); - assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result, - NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + assert_d_eq( + mallctl("arenas.bin.0.nregs", &result_ref, &len_result, NULL, 0), 0, + "Unexpected mallctl() failure"); expect_zu_eq(result, result_ref, "mallctlbymib() and mallctl() returned different result"); } TEST_END TEST_BEGIN(test_mallctlbymibname) { - size_t mib[4]; - size_t miblen = 4; + size_t mib[4]; + size_t miblen = 4; uint32_t result, result_ref; - size_t len_result = sizeof(uint32_t); + size_t len_result = sizeof(uint32_t); tsd_t *tsd = tsd_fetch(); @@ -202,50 +209,60 @@ TEST_BEGIN(test_mallctlbymibname) { assert_zu_eq(miblen, 1, ""); miblen = 4; - assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0", &miblen, - &result, &len_result, NULL, 0), ENOENT, ""); + assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0", &miblen, &result, + &len_result, NULL, 0), + ENOENT, ""); miblen = 4; - assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.bob", &miblen, - &result, &len_result, NULL, 0), ENOENT, ""); + assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.bob", &miblen, &result, + &len_result, NULL, 0), + ENOENT, ""); assert_zu_eq(miblen, 4, ""); /* Valid cases. */ - assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result, - NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq( + mallctl("arenas.bin.0.nregs", &result_ref, &len_result, NULL, 0), 0, + "Unexpected mallctl() failure"); miblen = 4; assert_d_eq(ctl_bymibname(tsd, mib, 0, "arenas.bin.0.nregs", &miblen, - &result, &len_result, NULL, 0), 0, ""); + &result, &len_result, NULL, 0), + 0, ""); assert_zu_eq(miblen, 4, ""); expect_zu_eq(result, result_ref, "Unexpected result"); assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.nregs", &miblen, &result, - &len_result, NULL, 0), 0, ""); + &len_result, NULL, 0), + 0, ""); assert_zu_eq(miblen, 4, ""); expect_zu_eq(result, result_ref, "Unexpected result"); assert_d_eq(ctl_bymibname(tsd, mib, 2, "0.nregs", &miblen, &result, - &len_result, NULL, 0), 0, ""); + &len_result, NULL, 0), + 0, ""); assert_zu_eq(miblen, 4, ""); expect_zu_eq(result, result_ref, "Unexpected result"); assert_d_eq(ctl_bymibname(tsd, mib, 3, "nregs", &miblen, &result, - &len_result, NULL, 0), 0, ""); + &len_result, NULL, 0), + 0, ""); assert_zu_eq(miblen, 4, ""); expect_zu_eq(result, result_ref, "Unexpected result"); } TEST_END TEST_BEGIN(test_mallctl_config) { -#define TEST_MALLCTL_CONFIG(config, t) do { \ - t oldval; \ - size_t sz = sizeof(oldval); \ - expect_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ - NULL, 0), 0, "Unexpected mallctl() failure"); \ - expect_b_eq(oldval, config_##config, "Incorrect config value"); \ - expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ -} while (0) +#define TEST_MALLCTL_CONFIG(config, t) \ + do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + expect_d_eq( \ + mallctl("config." #config, (void *)&oldval, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + expect_b_eq( \ + oldval, config_##config, "Incorrect config value"); \ + expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ + } while (0) TEST_MALLCTL_CONFIG(cache_oblivious, bool); TEST_MALLCTL_CONFIG(debug, bool); @@ -267,16 +284,17 @@ TEST_END TEST_BEGIN(test_mallctl_opt) { bool config_always = true; -#define TEST_MALLCTL_OPT(t, opt, config) do { \ - t oldval; \ - size_t sz = sizeof(oldval); \ - int expected = config_##config ? 0 : ENOENT; \ - int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \ - 0); \ - expect_d_eq(result, expected, \ - "Unexpected mallctl() result for opt."#opt); \ - expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ -} while (0) +#define TEST_MALLCTL_OPT(t, opt, config) \ + do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + int expected = config_##config ? 0 : ENOENT; \ + int result = mallctl( \ + "opt." #opt, (void *)&oldval, &sz, NULL, 0); \ + expect_d_eq(result, expected, \ + "Unexpected mallctl() result for opt." #opt); \ + expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ + } while (0) TEST_MALLCTL_OPT(bool, abort, always); TEST_MALLCTL_OPT(bool, abort_conf, always); @@ -341,8 +359,8 @@ TEST_END TEST_BEGIN(test_manpage_example) { unsigned nbins, i; - size_t mib[4]; - size_t len, miblen; + size_t mib[4]; + size_t len, miblen; len = sizeof(nbins); expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, @@ -356,8 +374,9 @@ TEST_BEGIN(test_manpage_example) { mib[2] = i; len = sizeof(bin_size); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len, - NULL, 0), 0, "Unexpected mallctlbymib() failure"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0), + 0, "Unexpected mallctlbymib() failure"); /* Do something with bin_size... */ } } @@ -380,8 +399,8 @@ TEST_BEGIN(test_tcache_none) { void *p1 = mallocx(42, 0); expect_ptr_not_null(p1, "Unexpected mallocx() failure"); if (!opt_prof && !san_uaf_detection_enabled()) { - expect_ptr_eq(p0, p1, - "Expected tcache to allocate cached region"); + expect_ptr_eq( + p0, p1, "Expected tcache to allocate cached region"); } /* Clean up. */ @@ -390,12 +409,12 @@ TEST_BEGIN(test_tcache_none) { TEST_END TEST_BEGIN(test_tcache) { -#define NTCACHES 10 +#define NTCACHES 10 unsigned tis[NTCACHES]; - void *ps[NTCACHES]; - void *qs[NTCACHES]; + void *ps[NTCACHES]; + void *qs[NTCACHES]; unsigned i; - size_t sz, psz, qsz; + size_t sz, psz, qsz; psz = 42; qsz = nallocx(psz, 0) + 1; @@ -403,39 +422,41 @@ TEST_BEGIN(test_tcache) { /* Create tcaches. */ for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); - expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, - 0), 0, "Unexpected mallctl() failure, i=%u", i); + expect_d_eq( + mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, + "Unexpected mallctl() failure, i=%u", i); } /* Exercise tcache ID recycling. */ for (i = 0; i < NTCACHES; i++) { expect_d_eq(mallctl("tcache.destroy", NULL, NULL, - (void *)&tis[i], sizeof(unsigned)), 0, - "Unexpected mallctl() failure, i=%u", i); + (void *)&tis[i], sizeof(unsigned)), + 0, "Unexpected mallctl() failure, i=%u", i); } for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); - expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, - 0), 0, "Unexpected mallctl() failure, i=%u", i); + expect_d_eq( + mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, + "Unexpected mallctl() failure, i=%u", i); } /* Flush empty tcaches. */ for (i = 0; i < NTCACHES; i++) { expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); + sizeof(unsigned)), + 0, "Unexpected mallctl() failure, i=%u", i); } /* Cache some allocations. */ for (i = 0; i < NTCACHES; i++) { ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); - expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", - i); + expect_ptr_not_null( + ps[i], "Unexpected mallocx() failure, i=%u", i); dallocx(ps[i], MALLOCX_TCACHE(tis[i])); qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); - expect_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", - i); + expect_ptr_not_null( + qs[i], "Unexpected mallocx() failure, i=%u", i); dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } @@ -443,11 +464,13 @@ TEST_BEGIN(test_tcache) { for (i = 0; i < NTCACHES; i++) { void *p0 = ps[i]; ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); - expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", - i); + expect_ptr_not_null( + ps[i], "Unexpected mallocx() failure, i=%u", i); if (!san_uaf_detection_enabled()) { - expect_ptr_eq(ps[i], p0, "Expected mallocx() to " - "allocate cached region, i=%u", i); + expect_ptr_eq(ps[i], p0, + "Expected mallocx() to " + "allocate cached region, i=%u", + i); } } @@ -455,11 +478,13 @@ TEST_BEGIN(test_tcache) { for (i = 0; i < NTCACHES; i++) { void *q0 = qs[i]; qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); - expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", - i); + expect_ptr_not_null( + qs[i], "Unexpected rallocx() failure, i=%u", i); if (!san_uaf_detection_enabled()) { - expect_ptr_eq(qs[i], q0, "Expected rallocx() to " - "allocate cached region, i=%u", i); + expect_ptr_eq(qs[i], q0, + "Expected rallocx() to " + "allocate cached region, i=%u", + i); } /* Avoid undefined behavior in case of test failure. */ if (qs[i] == NULL) { @@ -471,17 +496,17 @@ TEST_BEGIN(test_tcache) { } /* Flush some non-empty tcaches. */ - for (i = 0; i < NTCACHES/2; i++) { + for (i = 0; i < NTCACHES / 2; i++) { expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); + sizeof(unsigned)), + 0, "Unexpected mallctl() failure, i=%u", i); } /* Destroy tcaches. */ for (i = 0; i < NTCACHES; i++) { expect_d_eq(mallctl("tcache.destroy", NULL, NULL, - (void *)&tis[i], sizeof(unsigned)), 0, - "Unexpected mallctl() failure, i=%u", i); + (void *)&tis[i], sizeof(unsigned)), + 0, "Unexpected mallctl() failure, i=%u", i); } } TEST_END @@ -490,7 +515,7 @@ TEST_BEGIN(test_thread_arena) { unsigned old_arena_ind, new_arena_ind, narenas; const char *opa; - size_t sz = sizeof(opa); + size_t sz = sizeof(opa); expect_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -505,20 +530,23 @@ TEST_BEGIN(test_thread_arena) { if (strcmp(opa, "disabled") == 0) { new_arena_ind = narenas - 1; expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, - (void *)&new_arena_ind, sizeof(unsigned)), 0, - "Unexpected mallctl() failure"); + (void *)&new_arena_ind, sizeof(unsigned)), + 0, "Unexpected mallctl() failure"); new_arena_ind = 0; expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, - (void *)&new_arena_ind, sizeof(unsigned)), 0, - "Unexpected mallctl() failure"); + (void *)&new_arena_ind, sizeof(unsigned)), + 0, "Unexpected mallctl() failure"); } else { expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + NULL, 0), + 0, "Unexpected mallctl() failure"); new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1; if (old_arena_ind != new_arena_ind) { - expect_d_eq(mallctl("thread.arena", - (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, - sizeof(unsigned)), EPERM, "thread.arena ctl " + expect_d_eq( + mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&new_arena_ind, sizeof(unsigned)), + EPERM, + "thread.arena ctl " "should not be allowed with percpu arena"); } } @@ -527,10 +555,10 @@ TEST_END TEST_BEGIN(test_arena_i_initialized) { unsigned narenas, i; - size_t sz; - size_t mib[3]; - size_t miblen = sizeof(mib) / sizeof(size_t); - bool initialized; + size_t sz; + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + bool initialized; sz = sizeof(narenas); expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), @@ -541,8 +569,9 @@ TEST_BEGIN(test_arena_i_initialized) { for (i = 0; i < narenas; i++) { mib[1] = i; sz = sizeof(initialized); - expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, - 0), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); } mib[1] = MALLCTL_ARENAS_ALL; @@ -554,10 +583,10 @@ TEST_BEGIN(test_arena_i_initialized) { /* Equivalent to the above but using mallctl() directly. */ sz = sizeof(initialized); - expect_d_eq(mallctl( - "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized", - (void *)&initialized, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized", + (void *)&initialized, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); expect_true(initialized, "Merged arena statistics should always be initialized"); } @@ -565,30 +594,31 @@ TEST_END TEST_BEGIN(test_arena_i_dirty_decay_ms) { ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; - size_t sz = sizeof(ssize_t); + size_t sz = sizeof(ssize_t); expect_d_eq(mallctl("arena.0.dirty_decay_ms", - (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); dirty_decay_ms = -2; expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, - (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); + (void *)&dirty_decay_ms, sizeof(ssize_t)), + EFAULT, "Unexpected mallctl() success"); dirty_decay_ms = 0x7fffffff; expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, - (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); + (void *)&dirty_decay_ms, sizeof(ssize_t)), + 0, "Unexpected mallctl() failure"); for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; - dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, - dirty_decay_ms++) { + dirty_decay_ms < 20; + prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms++) { ssize_t old_dirty_decay_ms; expect_d_eq(mallctl("arena.0.dirty_decay_ms", - (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, - sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + (void *)&old_dirty_decay_ms, &sz, + (void *)&dirty_decay_ms, sizeof(ssize_t)), + 0, "Unexpected mallctl() failure"); expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, "Unexpected old arena.0.dirty_decay_ms"); } @@ -597,30 +627,31 @@ TEST_END TEST_BEGIN(test_arena_i_muzzy_decay_ms) { ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; - size_t sz = sizeof(ssize_t); + size_t sz = sizeof(ssize_t); expect_d_eq(mallctl("arena.0.muzzy_decay_ms", - (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); muzzy_decay_ms = -2; expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, - (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); + (void *)&muzzy_decay_ms, sizeof(ssize_t)), + EFAULT, "Unexpected mallctl() success"); muzzy_decay_ms = 0x7fffffff; expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, - (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); + (void *)&muzzy_decay_ms, sizeof(ssize_t)), + 0, "Unexpected mallctl() failure"); for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; - muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, - muzzy_decay_ms++) { + muzzy_decay_ms < 20; + prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms++) { ssize_t old_muzzy_decay_ms; expect_d_eq(mallctl("arena.0.muzzy_decay_ms", - (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, - sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + (void *)&old_muzzy_decay_ms, &sz, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), + 0, "Unexpected mallctl() failure"); expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, "Unexpected old arena.0.muzzy_decay_ms"); } @@ -629,9 +660,9 @@ TEST_END TEST_BEGIN(test_arena_i_purge) { unsigned narenas; - size_t sz = sizeof(unsigned); - size_t mib[3]; - size_t miblen = 3; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -652,9 +683,9 @@ TEST_END TEST_BEGIN(test_arena_i_decay) { unsigned narenas; - size_t sz = sizeof(unsigned); - size_t mib[3]; - size_t miblen = 3; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -675,86 +706,89 @@ TEST_END TEST_BEGIN(test_arena_i_dss) { const char *dss_prec_old, *dss_prec_new; - size_t sz = sizeof(dss_prec_old); - size_t mib[3]; - size_t miblen; + size_t sz = sizeof(dss_prec_old); + size_t mib[3]; + size_t miblen; - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); dss_prec_new = "disabled"; expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, - (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, - "Unexpected mallctl() failure"); - expect_str_ne(dss_prec_old, "primary", - "Unexpected default for dss precedence"); + (void *)&dss_prec_new, sizeof(dss_prec_new)), + 0, "Unexpected mallctl() failure"); + expect_str_ne( + dss_prec_old, "primary", "Unexpected default for dss precedence"); expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, - (void *)&dss_prec_old, sizeof(dss_prec_old)), 0, - "Unexpected mallctl() failure"); + (void *)&dss_prec_old, sizeof(dss_prec_old)), + 0, "Unexpected mallctl() failure"); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, - 0), 0, "Unexpected mallctl() failure"); - expect_str_ne(dss_prec_old, "primary", - "Unexpected value for dss precedence"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + expect_str_ne( + dss_prec_old, "primary", "Unexpected value for dss precedence"); mib[1] = narenas_total_get(); dss_prec_new = "disabled"; expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, - (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, - "Unexpected mallctl() failure"); - expect_str_ne(dss_prec_old, "primary", - "Unexpected default for dss precedence"); + (void *)&dss_prec_new, sizeof(dss_prec_new)), + 0, "Unexpected mallctl() failure"); + expect_str_ne( + dss_prec_old, "primary", "Unexpected default for dss precedence"); expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, - (void *)&dss_prec_old, sizeof(dss_prec_new)), 0, - "Unexpected mallctl() failure"); + (void *)&dss_prec_old, sizeof(dss_prec_new)), + 0, "Unexpected mallctl() failure"); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, - 0), 0, "Unexpected mallctl() failure"); - expect_str_ne(dss_prec_old, "primary", - "Unexpected value for dss precedence"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + expect_str_ne( + dss_prec_old, "primary", "Unexpected value for dss precedence"); } TEST_END TEST_BEGIN(test_arena_i_name) { - unsigned arena_ind; - size_t ind_sz = sizeof(arena_ind); - size_t mib[3]; - size_t miblen; - char name_old[ARENA_NAME_LEN]; - char *name_oldp = name_old; - size_t sz = sizeof(name_oldp); - char default_name[ARENA_NAME_LEN]; + unsigned arena_ind; + size_t ind_sz = sizeof(arena_ind); + size_t mib[3]; + size_t miblen; + char name_old[ARENA_NAME_LEN]; + char *name_oldp = name_old; + size_t sz = sizeof(name_oldp); + char default_name[ARENA_NAME_LEN]; const char *name_new = "test name"; const char *super_long_name = "A name longer than ARENA_NAME_LEN"; - size_t super_long_name_len = strlen(super_long_name); + size_t super_long_name_len = strlen(super_long_name); assert(super_long_name_len > ARENA_NAME_LEN); - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.name", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); - expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &ind_sz, NULL, - 0), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("arenas.create", (void *)&arena_ind, &ind_sz, NULL, 0), 0, + "Unexpected mallctl() failure"); mib[1] = arena_ind; - malloc_snprintf(default_name, sizeof(default_name), "manual_%u", - arena_ind); + malloc_snprintf( + default_name, sizeof(default_name), "manual_%u", arena_ind); expect_d_eq(mallctlbymib(mib, miblen, (void *)&name_oldp, &sz, - (void *)&name_new, sizeof(name_new)), 0, - "Unexpected mallctl() failure"); - expect_str_eq(name_old, default_name, - "Unexpected default value for arena name"); + (void *)&name_new, sizeof(name_new)), + 0, "Unexpected mallctl() failure"); + expect_str_eq( + name_old, default_name, "Unexpected default value for arena name"); expect_d_eq(mallctlbymib(mib, miblen, (void *)&name_oldp, &sz, - (void *)&super_long_name, sizeof(super_long_name)), 0, - "Unexpected mallctl() failure"); + (void *)&super_long_name, sizeof(super_long_name)), + 0, "Unexpected mallctl() failure"); expect_str_eq(name_old, name_new, "Unexpected value for arena name"); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&name_oldp, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&name_oldp, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); int cmp = strncmp(name_old, super_long_name, ARENA_NAME_LEN - 1); expect_true(cmp == 0, "Unexpected value for long arena name "); } @@ -765,14 +799,14 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) { size_t mib[3]; size_t miblen; - bool retain_enabled; + bool retain_enabled; size_t sz = sizeof(retain_enabled); - expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0), - 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); test_skip_if(!retain_enabled); sz = sizeof(default_limit); - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); @@ -782,58 +816,62 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) { "Unexpected default for retain_grow_limit"); new_limit = PAGE - 1; - expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, - sizeof(new_limit)), EFAULT, "Unexpected mallctl() success"); + expect_d_eq(mallctlbymib( + mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), + EFAULT, "Unexpected mallctl() success"); new_limit = PAGE + 1; - expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, - sizeof(new_limit)), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctlbymib( + mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), + 0, "Unexpected mallctl() failure"); expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - expect_zu_eq(old_limit, PAGE, - "Unexpected value for retain_grow_limit"); + expect_zu_eq(old_limit, PAGE, "Unexpected value for retain_grow_limit"); /* Expect grow less than psize class 10. */ new_limit = sz_pind2sz(10) - 1; - expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, - sizeof(new_limit)), 0, "Unexpected mallctl() failure"); + expect_d_eq(mallctlbymib( + mib, miblen, NULL, NULL, &new_limit, sizeof(new_limit)), + 0, "Unexpected mallctl() failure"); expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - expect_zu_eq(old_limit, sz_pind2sz(9), - "Unexpected value for retain_grow_limit"); + expect_zu_eq( + old_limit, sz_pind2sz(9), "Unexpected value for retain_grow_limit"); /* Restore to default. */ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit, - sizeof(default_limit)), 0, "Unexpected mallctl() failure"); + sizeof(default_limit)), + 0, "Unexpected mallctl() failure"); } TEST_END TEST_BEGIN(test_arenas_dirty_decay_ms) { ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; - size_t sz = sizeof(ssize_t); + size_t sz = sizeof(ssize_t); expect_d_eq(mallctl("arenas.dirty_decay_ms", - (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); dirty_decay_ms = -2; expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, - (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); + (void *)&dirty_decay_ms, sizeof(ssize_t)), + EFAULT, "Unexpected mallctl() success"); dirty_decay_ms = 0x7fffffff; expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, - (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, - "Expected mallctl() failure"); + (void *)&dirty_decay_ms, sizeof(ssize_t)), + 0, "Expected mallctl() failure"); for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; - dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, - dirty_decay_ms++) { + dirty_decay_ms < 20; + prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms++) { ssize_t old_dirty_decay_ms; expect_d_eq(mallctl("arenas.dirty_decay_ms", - (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, - sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + (void *)&old_dirty_decay_ms, &sz, + (void *)&dirty_decay_ms, sizeof(ssize_t)), + 0, "Unexpected mallctl() failure"); expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, "Unexpected old arenas.dirty_decay_ms"); } @@ -842,30 +880,31 @@ TEST_END TEST_BEGIN(test_arenas_muzzy_decay_ms) { ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; - size_t sz = sizeof(ssize_t); + size_t sz = sizeof(ssize_t); expect_d_eq(mallctl("arenas.muzzy_decay_ms", - (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); muzzy_decay_ms = -2; expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, - (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); + (void *)&muzzy_decay_ms, sizeof(ssize_t)), + EFAULT, "Unexpected mallctl() success"); muzzy_decay_ms = 0x7fffffff; expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, - (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, - "Expected mallctl() failure"); + (void *)&muzzy_decay_ms, sizeof(ssize_t)), + 0, "Expected mallctl() failure"); for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; - muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, - muzzy_decay_ms++) { + muzzy_decay_ms < 20; + prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms++) { ssize_t old_muzzy_decay_ms; expect_d_eq(mallctl("arenas.muzzy_decay_ms", - (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, - sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + (void *)&old_muzzy_decay_ms, &sz, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), + 0, "Unexpected mallctl() failure"); expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, "Unexpected old arenas.muzzy_decay_ms"); } @@ -873,13 +912,15 @@ TEST_BEGIN(test_arenas_muzzy_decay_ms) { TEST_END TEST_BEGIN(test_arenas_constants) { -#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - expect_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ - expect_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) +#define TEST_ARENAS_CONSTANT(t, name, expected) \ + do { \ + t name; \ + size_t sz = sizeof(t); \ + expect_d_eq( \ + mallctl("arenas." #name, (void *)&name, &sz, NULL, 0), 0, \ + "Unexpected mallctl() failure"); \ + expect_zu_eq(name, expected, "Incorrect " #name " size"); \ + } while (0) TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, page, PAGE); @@ -892,18 +933,19 @@ TEST_BEGIN(test_arenas_constants) { TEST_END TEST_BEGIN(test_arenas_bin_constants) { -#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - expect_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ - NULL, 0), 0, "Unexpected mallctl() failure"); \ - expect_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) +#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) \ + do { \ + t name; \ + size_t sz = sizeof(t); \ + expect_d_eq(mallctl("arenas.bin.0." #name, (void *)&name, &sz, \ + NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + expect_zu_eq(name, expected, "Incorrect " #name " size"); \ + } while (0) TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs); - TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, - bin_infos[0].slab_size); + TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, bin_infos[0].slab_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards); #undef TEST_ARENAS_BIN_CONSTANT @@ -911,16 +953,17 @@ TEST_BEGIN(test_arenas_bin_constants) { TEST_END TEST_BEGIN(test_arenas_lextent_constants) { -#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - expect_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \ - &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ - expect_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) +#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) \ + do { \ + t name; \ + size_t sz = sizeof(t); \ + expect_d_eq(mallctl("arenas.lextent.0." #name, (void *)&name, \ + &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + expect_zu_eq(name, expected, "Incorrect " #name " size"); \ + } while (0) - TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, - SC_LARGE_MINCLASS); + TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, SC_LARGE_MINCLASS); #undef TEST_ARENAS_LEXTENT_CONSTANT } @@ -928,25 +971,27 @@ TEST_END TEST_BEGIN(test_arenas_create) { unsigned narenas_before, arena, narenas_after; - size_t sz = sizeof(unsigned); + size_t sz = sizeof(unsigned); - expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("arenas.narenas", (void *)&narenas_before, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, - 0), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); - expect_u_eq(narenas_before+1, narenas_after, + expect_u_eq(narenas_before + 1, narenas_after, "Unexpected number of arenas before versus after extension"); - expect_u_eq(arena, narenas_after-1, "Unexpected arena index"); + expect_u_eq(arena, narenas_after - 1, "Unexpected arena index"); } TEST_END TEST_BEGIN(test_arenas_lookup) { unsigned arena, arena1; - void *ptr; - size_t sz = sizeof(unsigned); + void *ptr; + size_t sz = sizeof(unsigned); expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -967,7 +1012,7 @@ TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); test_skip_if(opt_prof); - bool active, old; + bool active, old; size_t len = sizeof(bool); active = true; @@ -987,12 +1032,14 @@ TEST_BEGIN(test_prof_active) { TEST_END TEST_BEGIN(test_stats_arenas) { -#define TEST_STATS_ARENAS(t, name) do { \ - t name; \ - size_t sz = sizeof(t); \ - expect_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ - NULL, 0), 0, "Unexpected mallctl() failure"); \ -} while (0) +#define TEST_STATS_ARENAS(t, name) \ + do { \ + t name; \ + size_t sz = sizeof(t); \ + expect_d_eq(mallctl("stats.arenas.0." #name, (void *)&name, \ + &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + } while (0) TEST_STATS_ARENAS(unsigned, nthreads); TEST_STATS_ARENAS(const char *, dss); @@ -1008,13 +1055,14 @@ TEST_END TEST_BEGIN(test_stats_arenas_hpa_shard_counters) { test_skip_if(!config_stats); -#define TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(t, name) do { \ - t name; \ - size_t sz = sizeof(t); \ - expect_d_eq(mallctl("stats.arenas.0.hpa_shard."#name, \ - (void *)&name, &sz, \ - NULL, 0), 0, "Unexpected mallctl() failure"); \ -} while (0) +#define TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(t, name) \ + do { \ + t name; \ + size_t sz = sizeof(t); \ + expect_d_eq(mallctl("stats.arenas.0.hpa_shard." #name, \ + (void *)&name, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + } while (0) TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(size_t, npageslabs); TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(size_t, nactive); @@ -1031,19 +1079,22 @@ TEST_END TEST_BEGIN(test_stats_arenas_hpa_shard_slabs) { test_skip_if(!config_stats); -#define TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, name) do { \ - t slab##_##name; \ - size_t sz = sizeof(t); \ - expect_d_eq(mallctl("stats.arenas.0.hpa_shard."#slab"."#name, \ - (void *)&slab##_##name, &sz, \ - NULL, 0), 0, "Unexpected mallctl() failure"); \ -} while (0) +#define TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, name) \ + do { \ + t slab##_##name; \ + size_t sz = sizeof(t); \ + expect_d_eq( \ + mallctl("stats.arenas.0.hpa_shard." #slab "." #name, \ + (void *)&slab##_##name, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + } while (0) -#define TEST_STATS_ARENAS_HPA_SHARD_SLABS(t, slab, name) do { \ - TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, \ - name##_##nonhuge); \ - TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, name##_##huge); \ -} while (0) +#define TEST_STATS_ARENAS_HPA_SHARD_SLABS(t, slab, name) \ + do { \ + TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN( \ + t, slab, name##_##nonhuge); \ + TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, name##_##huge); \ + } while (0) TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, slabs, npageslabs); TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, slabs, nactive); @@ -1069,18 +1120,18 @@ alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result, } static void -dalloc_hook(void *extra, UNUSED hook_dalloc_t type, - UNUSED void *address, UNUSED uintptr_t args_raw[3]) { +dalloc_hook(void *extra, UNUSED hook_dalloc_t type, UNUSED void *address, + UNUSED uintptr_t args_raw[3]) { *(bool *)extra = true; } TEST_BEGIN(test_hooks) { - bool hook_called = false; + bool hook_called = false; hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called}; - void *handle = NULL; - size_t sz = sizeof(handle); - int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, - sizeof(hooks)); + void *handle = NULL; + size_t sz = sizeof(handle); + int err = mallctl( + "experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks)); expect_d_eq(err, 0, "Hook installation failed"); expect_ptr_ne(handle, NULL, "Hook installation gave null handle"); void *ptr = mallocx(1, 0); @@ -1089,8 +1140,8 @@ TEST_BEGIN(test_hooks) { free(ptr); expect_true(hook_called, "Free hook not called"); - err = mallctl("experimental.hooks.remove", NULL, NULL, &handle, - sizeof(handle)); + err = mallctl( + "experimental.hooks.remove", NULL, NULL, &handle, sizeof(handle)); expect_d_eq(err, 0, "Hook removal failed"); hook_called = false; ptr = mallocx(1, 0); @@ -1100,13 +1151,13 @@ TEST_BEGIN(test_hooks) { TEST_END TEST_BEGIN(test_hooks_exhaustion) { - bool hook_called = false; + bool hook_called = false; hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called}; - void *handle; - void *handles[HOOK_MAX]; + void *handle; + void *handles[HOOK_MAX]; size_t sz = sizeof(handle); - int err; + int err; for (int i = 0; i < HOOK_MAX; i++) { handle = NULL; err = mallctl("experimental.hooks.install", &handle, &sz, @@ -1115,8 +1166,8 @@ TEST_BEGIN(test_hooks_exhaustion) { expect_ptr_ne(handle, NULL, "Got NULL handle"); handles[i] = handle; } - err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, - sizeof(hooks)); + err = mallctl( + "experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks)); expect_d_eq(err, EAGAIN, "Should have failed hook installation"); for (int i = 0; i < HOOK_MAX; i++) { err = mallctl("experimental.hooks.remove", NULL, NULL, @@ -1125,12 +1176,12 @@ TEST_BEGIN(test_hooks_exhaustion) { } /* Insertion failed, but then we removed some; it should work now. */ handle = NULL; - err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, - sizeof(hooks)); + err = mallctl( + "experimental.hooks.install", &handle, &sz, &hooks, sizeof(hooks)); expect_d_eq(err, 0, "Hook insertion failed"); expect_ptr_ne(handle, NULL, "Got NULL handle"); - err = mallctl("experimental.hooks.remove", NULL, NULL, &handle, - sizeof(handle)); + err = mallctl( + "experimental.hooks.remove", NULL, NULL, &handle, sizeof(handle)); expect_d_eq(err, 0, "Hook removal failed"); } TEST_END @@ -1144,7 +1195,7 @@ TEST_BEGIN(test_thread_idle) { */ test_skip_if(!config_stats); - int err; + int err; size_t sz; size_t miblen; @@ -1164,14 +1215,15 @@ TEST_BEGIN(test_thread_idle) { sz = sizeof(arena_ind); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - err = mallctl("thread.arena", NULL, NULL, &arena_ind, sizeof(arena_ind)); + err = mallctl( + "thread.arena", NULL, NULL, &arena_ind, sizeof(arena_ind)); expect_d_eq(err, 0, "Unexpected mallctl() failure"); err = mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); expect_d_eq(err, 0, "Unexpected mallctl() failure"); /* We're going to do an allocation of size 1, which we know is small. */ size_t mib[5]; - miblen = sizeof(mib)/sizeof(mib[0]); + miblen = sizeof(mib) / sizeof(mib[0]); err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen); expect_d_eq(err, 0, ""); mib[2] = arena_ind; @@ -1220,9 +1272,9 @@ TEST_BEGIN(test_thread_peak) { size_t big_size = 10 * 1024 * 1024; size_t small_size = 256; - void *ptr; - int err; - size_t sz; + void *ptr; + int err; + size_t sz; uint64_t peak; sz = sizeof(uint64_t); @@ -1293,9 +1345,9 @@ TEST_BEGIN(test_thread_activity_callback) { test_skip_if(!config_stats); const size_t big_size = 10 * 1024 * 1024; - void *ptr; - int err; - size_t sz; + void *ptr; + int err; + size_t sz; uint64_t *allocatedp; uint64_t *deallocatedp; @@ -1305,12 +1357,12 @@ TEST_BEGIN(test_thread_activity_callback) { err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0); assert_d_eq(0, err, ""); - activity_callback_thunk_t old_thunk = {(activity_callback_t)111, - (void *)222}; + activity_callback_thunk_t old_thunk = { + (activity_callback_t)111, (void *)222}; - activity_test_data_t test_data = {333, 444}; - activity_callback_thunk_t new_thunk = - {&activity_test_callback, &test_data}; + activity_test_data_t test_data = {333, 444}; + activity_callback_thunk_t new_thunk = { + &activity_test_callback, &test_data}; sz = sizeof(old_thunk); err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz, @@ -1329,7 +1381,7 @@ TEST_BEGIN(test_thread_activity_callback) { expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, ""); sz = sizeof(old_thunk); - new_thunk = (activity_callback_thunk_t){ NULL, NULL }; + new_thunk = (activity_callback_thunk_t){NULL, NULL}; err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz, &new_thunk, sizeof(new_thunk)); assert_d_eq(0, err, ""); @@ -1347,8 +1399,6 @@ TEST_BEGIN(test_thread_activity_callback) { } TEST_END - - static unsigned nuser_thread_event_cb_calls; static void user_thread_event_cb(bool is_alloc, uint64_t tallocated, uint64_t tdallocated) { @@ -1357,25 +1407,25 @@ user_thread_event_cb(bool is_alloc, uint64_t tallocated, uint64_t tdallocated) { ++nuser_thread_event_cb_calls; } static user_hook_object_t user_te_obj = { - .callback = user_thread_event_cb, - .interval = 100, - .is_alloc_only = false, + .callback = user_thread_event_cb, + .interval = 100, + .is_alloc_only = false, }; TEST_BEGIN(test_thread_event_hook) { const size_t big_size = 10 * 1024 * 1024; - void *ptr; - int err; + void *ptr; + int err; unsigned current_calls = nuser_thread_event_cb_calls; - err = mallctl("experimental.hooks.thread_event", NULL, 0, - &user_te_obj, sizeof(user_te_obj)); + err = mallctl("experimental.hooks.thread_event", NULL, 0, &user_te_obj, + sizeof(user_te_obj)); assert_d_eq(0, err, ""); - err = mallctl("experimental.hooks.thread_event", NULL, 0, - &user_te_obj, sizeof(user_te_obj)); - assert_d_eq(0, err, "Not an error to provide object with same interval and cb"); - + err = mallctl("experimental.hooks.thread_event", NULL, 0, &user_te_obj, + sizeof(user_te_obj)); + assert_d_eq( + 0, err, "Not an error to provide object with same interval and cb"); ptr = mallocx(big_size, 0); free(ptr); @@ -1383,47 +1433,23 @@ TEST_BEGIN(test_thread_event_hook) { } TEST_END - int main(void) { - return test( - test_mallctl_errors, - test_mallctlnametomib_errors, - test_mallctlbymib_errors, - test_mallctl_read_write, - test_mallctlnametomib_short_mib, - test_mallctlnametomib_short_name, - test_mallctlmibnametomib, - test_mallctlbymibname, - test_mallctl_config, - test_mallctl_opt, - test_manpage_example, - test_tcache_none, - test_tcache, - test_thread_arena, - test_arena_i_initialized, - test_arena_i_dirty_decay_ms, - test_arena_i_muzzy_decay_ms, - test_arena_i_purge, - test_arena_i_decay, - test_arena_i_dss, - test_arena_i_name, - test_arena_i_retain_grow_limit, - test_arenas_dirty_decay_ms, - test_arenas_muzzy_decay_ms, - test_arenas_constants, - test_arenas_bin_constants, - test_arenas_lextent_constants, - test_arenas_create, - test_arenas_lookup, - test_prof_active, - test_stats_arenas, + return test(test_mallctl_errors, test_mallctlnametomib_errors, + test_mallctlbymib_errors, test_mallctl_read_write, + test_mallctlnametomib_short_mib, test_mallctlnametomib_short_name, + test_mallctlmibnametomib, test_mallctlbymibname, + test_mallctl_config, test_mallctl_opt, test_manpage_example, + test_tcache_none, test_tcache, test_thread_arena, + test_arena_i_initialized, test_arena_i_dirty_decay_ms, + test_arena_i_muzzy_decay_ms, test_arena_i_purge, test_arena_i_decay, + test_arena_i_dss, test_arena_i_name, test_arena_i_retain_grow_limit, + test_arenas_dirty_decay_ms, test_arenas_muzzy_decay_ms, + test_arenas_constants, test_arenas_bin_constants, + test_arenas_lextent_constants, test_arenas_create, + test_arenas_lookup, test_prof_active, test_stats_arenas, test_stats_arenas_hpa_shard_counters, - test_stats_arenas_hpa_shard_slabs, - test_hooks, - test_hooks_exhaustion, - test_thread_idle, - test_thread_peak, - test_thread_activity_callback, - test_thread_event_hook); + test_stats_arenas_hpa_shard_slabs, test_hooks, + test_hooks_exhaustion, test_thread_idle, test_thread_peak, + test_thread_activity_callback, test_thread_event_hook); } diff --git a/test/unit/malloc_conf_2.c b/test/unit/malloc_conf_2.c index 9d2c6077..023b7102 100644 --- a/test/unit/malloc_conf_2.c +++ b/test/unit/malloc_conf_2.c @@ -13,12 +13,12 @@ TEST_BEGIN(test_malloc_conf_2) { test_skip_if(windows); ssize_t dirty_decay_ms; - size_t sz = sizeof(dirty_decay_ms); + size_t sz = sizeof(dirty_decay_ms); int err = mallctl("opt.dirty_decay_ms", &dirty_decay_ms, &sz, NULL, 0); assert_d_eq(err, 0, "Unexpected mallctl failure"); - expect_zd_eq(dirty_decay_ms, 1234, - "malloc_conf_2 setting didn't take effect"); + expect_zd_eq( + dirty_decay_ms, 1234, "malloc_conf_2 setting didn't take effect"); } TEST_END @@ -32,22 +32,24 @@ TEST_BEGIN(test_mallctl_global_var) { test_skip_if(windows); const char *mc; - size_t sz = sizeof(mc); - expect_d_eq(mallctl("opt.malloc_conf.global_var", - (void *)&mc, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - expect_str_eq(mc, malloc_conf, "Unexpected value for the global variable " + size_t sz = sizeof(mc); + expect_d_eq( + mallctl("opt.malloc_conf.global_var", (void *)&mc, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + expect_str_eq(mc, malloc_conf, + "Unexpected value for the global variable " "malloc_conf"); expect_d_eq(mallctl("opt.malloc_conf.global_var_2_conf_harder", - (void *)&mc, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - expect_str_eq(mc, malloc_conf_2_conf_harder, "Unexpected value for the " + (void *)&mc, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + expect_str_eq(mc, malloc_conf_2_conf_harder, + "Unexpected value for the " "global variable malloc_conf_2_conf_harder"); } TEST_END int main(void) { - return test( - test_malloc_conf_2, - test_mallctl_global_var); + return test(test_malloc_conf_2, test_mallctl_global_var); } diff --git a/test/unit/malloc_io.c b/test/unit/malloc_io.c index 385f7450..f7895945 100644 --- a/test/unit/malloc_io.c +++ b/test/unit/malloc_io.c @@ -14,77 +14,68 @@ TEST_BEGIN(test_malloc_strtoumax) { struct test_s { const char *input; const char *expected_remainder; - int base; - int expected_errno; + int base; + int expected_errno; const char *expected_errno_name; - uintmax_t expected_x; + uintmax_t expected_x; }; -#define ERR(e) e, #e -#define KUMAX(x) ((uintmax_t)x##ULL) -#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) - struct test_s tests[] = { - {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, - {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, - {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, +#define ERR(e) e, #e +#define KUMAX(x) ((uintmax_t)x##ULL) +#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) + struct test_s tests[] = {{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, - {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, - {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, - {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, - {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, + {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, + {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, + {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, + {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, - {"42", "", 0, ERR(0), KUMAX(42)}, - {"+42", "", 0, ERR(0), KUMAX(42)}, - {"-42", "", 0, ERR(0), KSMAX(-42)}, - {"042", "", 0, ERR(0), KUMAX(042)}, - {"+042", "", 0, ERR(0), KUMAX(042)}, - {"-042", "", 0, ERR(0), KSMAX(-042)}, - {"0x42", "", 0, ERR(0), KUMAX(0x42)}, - {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, - {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, + {"42", "", 0, ERR(0), KUMAX(42)}, {"+42", "", 0, ERR(0), KUMAX(42)}, + {"-42", "", 0, ERR(0), KSMAX(-42)}, + {"042", "", 0, ERR(0), KUMAX(042)}, + {"+042", "", 0, ERR(0), KUMAX(042)}, + {"-042", "", 0, ERR(0), KSMAX(-042)}, + {"0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, - {"0", "", 0, ERR(0), KUMAX(0)}, - {"1", "", 0, ERR(0), KUMAX(1)}, + {"0", "", 0, ERR(0), KUMAX(0)}, {"1", "", 0, ERR(0), KUMAX(1)}, - {"42", "", 0, ERR(0), KUMAX(42)}, - {" 42", "", 0, ERR(0), KUMAX(42)}, - {"42 ", " ", 0, ERR(0), KUMAX(42)}, - {"0x", "x", 0, ERR(0), KUMAX(0)}, - {"42x", "x", 0, ERR(0), KUMAX(42)}, + {"42", "", 0, ERR(0), KUMAX(42)}, {" 42", "", 0, ERR(0), KUMAX(42)}, + {"42 ", " ", 0, ERR(0), KUMAX(42)}, + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"42x", "x", 0, ERR(0), KUMAX(42)}, - {"07", "", 0, ERR(0), KUMAX(7)}, - {"010", "", 0, ERR(0), KUMAX(8)}, - {"08", "8", 0, ERR(0), KUMAX(0)}, - {"0_", "_", 0, ERR(0), KUMAX(0)}, + {"07", "", 0, ERR(0), KUMAX(7)}, {"010", "", 0, ERR(0), KUMAX(8)}, + {"08", "8", 0, ERR(0), KUMAX(0)}, {"0_", "_", 0, ERR(0), KUMAX(0)}, - {"0x", "x", 0, ERR(0), KUMAX(0)}, - {"0X", "X", 0, ERR(0), KUMAX(0)}, - {"0xg", "xg", 0, ERR(0), KUMAX(0)}, - {"0XA", "", 0, ERR(0), KUMAX(10)}, + {"0x", "x", 0, ERR(0), KUMAX(0)}, {"0X", "X", 0, ERR(0), KUMAX(0)}, + {"0xg", "xg", 0, ERR(0), KUMAX(0)}, + {"0XA", "", 0, ERR(0), KUMAX(10)}, - {"010", "", 10, ERR(0), KUMAX(10)}, - {"0x3", "x3", 10, ERR(0), KUMAX(0)}, + {"010", "", 10, ERR(0), KUMAX(10)}, + {"0x3", "x3", 10, ERR(0), KUMAX(0)}, - {"12", "2", 2, ERR(0), KUMAX(1)}, - {"78", "8", 8, ERR(0), KUMAX(7)}, - {"9a", "a", 10, ERR(0), KUMAX(9)}, - {"9A", "A", 10, ERR(0), KUMAX(9)}, - {"fg", "g", 16, ERR(0), KUMAX(15)}, - {"FG", "G", 16, ERR(0), KUMAX(15)}, - {"0xfg", "g", 16, ERR(0), KUMAX(15)}, - {"0XFG", "G", 16, ERR(0), KUMAX(15)}, - {"z_", "_", 36, ERR(0), KUMAX(35)}, - {"Z_", "_", 36, ERR(0), KUMAX(35)} - }; + {"12", "2", 2, ERR(0), KUMAX(1)}, {"78", "8", 8, ERR(0), KUMAX(7)}, + {"9a", "a", 10, ERR(0), KUMAX(9)}, + {"9A", "A", 10, ERR(0), KUMAX(9)}, + {"fg", "g", 16, ERR(0), KUMAX(15)}, + {"FG", "G", 16, ERR(0), KUMAX(15)}, + {"0xfg", "g", 16, ERR(0), KUMAX(15)}, + {"0XFG", "G", 16, ERR(0), KUMAX(15)}, + {"z_", "_", 36, ERR(0), KUMAX(35)}, + {"Z_", "_", 36, ERR(0), KUMAX(35)}}; #undef ERR #undef KUMAX #undef KSMAX unsigned i; - for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { + for (i = 0; i < sizeof(tests) / sizeof(struct test_s); i++) { struct test_s *test = &tests[i]; - int err; - uintmax_t result; - char *remainder; + int err; + uintmax_t result; + char *remainder; set_errno(0); result = malloc_strtoumax(test->input, &remainder, test->base); @@ -93,8 +84,8 @@ TEST_BEGIN(test_malloc_strtoumax) { "Expected errno %s for \"%s\", base %d", test->expected_errno_name, test->input, test->base); expect_str_eq(remainder, test->expected_remainder, - "Unexpected remainder for \"%s\", base %d", - test->input, test->base); + "Unexpected remainder for \"%s\", base %d", test->input, + test->base); if (err == 0) { expect_ju_eq(result, test->expected_x, "Unexpected result for \"%s\", base %d", @@ -105,31 +96,32 @@ TEST_BEGIN(test_malloc_strtoumax) { TEST_END TEST_BEGIN(test_malloc_snprintf_truncated) { -#define BUFLEN 15 - char buf[BUFLEN]; +#define BUFLEN 15 + char buf[BUFLEN]; size_t result; size_t len; -#define TEST(expected_str_untruncated, ...) do { \ - result = malloc_snprintf(buf, len, __VA_ARGS__); \ - expect_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ - "Unexpected string inequality (\"%s\" vs \"%s\")", \ - buf, expected_str_untruncated); \ - expect_zu_eq(result, strlen(expected_str_untruncated), \ - "Unexpected result"); \ -} while (0) +#define TEST(expected_str_untruncated, ...) \ + do { \ + result = malloc_snprintf(buf, len, __VA_ARGS__); \ + expect_d_eq(strncmp(buf, expected_str_untruncated, len - 1), \ + 0, "Unexpected string inequality (\"%s\" vs \"%s\")", buf, \ + expected_str_untruncated); \ + expect_zu_eq(result, strlen(expected_str_untruncated), \ + "Unexpected result"); \ + } while (0) for (len = 1; len < BUFLEN; len++) { - TEST("012346789", "012346789"); - TEST("a0123b", "a%sb", "0123"); - TEST("a01234567", "a%s%s", "0123", "4567"); - TEST("a0123 ", "a%-6s", "0123"); - TEST("a 0123", "a%6s", "0123"); - TEST("a 012", "a%6.3s", "0123"); - TEST("a 012", "a%*.*s", 6, 3, "0123"); - TEST("a 123b", "a% db", 123); - TEST("a123b", "a%-db", 123); - TEST("a-123b", "a%-db", -123); - TEST("a+123b", "a%+db", 123); + TEST("012346789", "012346789"); + TEST("a0123b", "a%sb", "0123"); + TEST("a01234567", "a%s%s", "0123", "4567"); + TEST("a0123 ", "a%-6s", "0123"); + TEST("a 0123", "a%6s", "0123"); + TEST("a 012", "a%6.3s", "0123"); + TEST("a 012", "a%*.*s", 6, 3, "0123"); + TEST("a 123b", "a% db", 123); + TEST("a123b", "a%-db", 123); + TEST("a-123b", "a%-db", -123); + TEST("a+123b", "a%+db", 123); } #undef BUFLEN #undef TEST @@ -137,14 +129,16 @@ TEST_BEGIN(test_malloc_snprintf_truncated) { TEST_END TEST_BEGIN(test_malloc_snprintf) { -#define BUFLEN 128 - char buf[BUFLEN]; +#define BUFLEN 128 + char buf[BUFLEN]; size_t result; -#define TEST(expected_str, ...) do { \ - result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ - expect_str_eq(buf, expected_str, "Unexpected output"); \ - expect_zu_eq(result, strlen(expected_str), "Unexpected result");\ -} while (0) +#define TEST(expected_str, ...) \ + do { \ + result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ + expect_str_eq(buf, expected_str, "Unexpected output"); \ + expect_zu_eq( \ + result, strlen(expected_str), "Unexpected result"); \ + } while (0) TEST("hello", "hello"); @@ -260,9 +254,6 @@ TEST_END int main(void) { - return test( - test_malloc_strtoumax_no_endptr, - test_malloc_strtoumax, - test_malloc_snprintf_truncated, - test_malloc_snprintf); + return test(test_malloc_strtoumax_no_endptr, test_malloc_strtoumax, + test_malloc_snprintf_truncated, test_malloc_snprintf); } diff --git a/test/unit/math.c b/test/unit/math.c index a32767c5..b0994768 100644 --- a/test/unit/math.c +++ b/test/unit/math.c @@ -6,11 +6,11 @@ #include #ifdef __PGI -#undef INFINITY +# undef INFINITY #endif #ifndef INFINITY -#define INFINITY (DBL_MAX + DBL_MAX) +# define INFINITY (DBL_MAX + DBL_MAX) #endif static bool @@ -20,7 +20,7 @@ double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { if (fabs(a - b) < max_abs_err) { return true; } - rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); + rel_err = (fabs(b) > fabs(a)) ? fabs((a - b) / b) : fabs((a - b) / a); return (rel_err < max_rel_err); } @@ -41,209 +41,206 @@ TEST_BEGIN(test_ln_gamma_factorial) { /* exp(ln_gamma(x)) == (x-1)! for integer x. */ for (x = 1; x <= 21; x++) { - expect_true(double_eq_rel(exp(ln_gamma(x)), - (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), + expect_true( + double_eq_rel(exp(ln_gamma(x)), (double)factorial(x - 1), + MAX_REL_ERR, MAX_ABS_ERR), "Incorrect factorial result for x=%u", x); } } TEST_END /* Expected ln_gamma([0.0..100.0] increment=0.25). */ -static const double ln_gamma_misc_expected[] = { - INFINITY, - 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, - 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, - -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, - 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, - 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, - 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, - 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, - 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, - 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, - 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, - 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, - 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, - 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, - 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, - 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, - 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, - 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, - 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, - 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, - 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, - 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, - 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, - 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, - 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, - 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, - 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, - 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, - 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, - 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, - 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, - 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, - 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, - 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, - 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, - 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, - 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, - 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, - 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, - 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, - 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, - 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, - 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, - 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, - 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, - 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, - 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, - 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, - 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, - 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, - 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, - 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, - 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, - 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, - 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, - 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, - 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, - 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, - 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, - 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, - 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, - 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, - 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, - 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, - 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, - 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, - 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, - 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, - 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, - 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, - 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, - 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, - 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, - 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, - 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, - 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, - 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, - 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, - 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, - 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, - 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, - 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, - 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, - 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, - 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, - 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, - 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, - 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, - 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, - 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, - 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, - 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, - 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, - 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, - 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, - 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, - 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, - 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, - 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, - 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, - 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, - 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, - 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, - 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, - 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, - 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, - 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, - 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, - 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, - 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, - 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, - 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, - 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, - 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, - 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, - 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, - 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, - 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, - 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, - 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, - 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, - 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, - 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, - 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, - 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, - 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, - 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, - 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, - 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, - 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, - 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, - 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, - 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, - 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, - 359.13420536957539753 -}; +static const double ln_gamma_misc_expected[] = {INFINITY, 1.28802252469807743, + 0.57236494292470008, 0.20328095143129538, 0.00000000000000000, + -0.09827183642181320, -0.12078223763524518, -0.08440112102048555, + 0.00000000000000000, 0.12487171489239651, 0.28468287047291918, + 0.47521466691493719, 0.69314718055994529, 0.93580193110872523, + 1.20097360234707429, 1.48681557859341718, 1.79175946922805496, + 2.11445692745037128, 2.45373657084244234, 2.80857141857573644, + 3.17805383034794575, 3.56137591038669710, 3.95781396761871651, + 4.36671603662228680, 4.78749174278204581, 5.21960398699022932, + 5.66256205985714178, 6.11591589143154568, 6.57925121201010121, + 7.05218545073853953, 7.53436423675873268, 8.02545839631598312, + 8.52516136106541467, 9.03318691960512332, 9.54926725730099690, + 10.07315123968123949, 10.60460290274525086, 11.14340011995171231, + 11.68933342079726856, 12.24220494005076176, 12.80182748008146909, + 13.36802367147604720, 13.94062521940376342, 14.51947222506051816, + 15.10441257307551943, 15.69530137706046524, 16.29200047656724237, + 16.89437797963419285, 17.50230784587389010, 18.11566950571089407, + 18.73434751193644843, 19.35823122022435427, 19.98721449566188468, + 20.62119544270163018, 21.26007615624470048, 21.90376249182879320, + 22.55216385312342098, 23.20519299513386002, 23.86276584168908954, + 24.52480131594137802, 25.19122118273868338, 25.86194990184851861, + 26.53691449111561340, 27.21604439872720604, 27.89927138384089389, + 28.58652940490193828, 29.27775451504081516, 29.97288476399884871, + 30.67186010608067548, 31.37462231367769050, 32.08111489594735843, + 32.79128302226991565, 33.50507345013689076, 34.22243445715505317, + 34.94331577687681545, 35.66766853819134298, 36.39544520803305261, + 37.12659953718355865, 37.86108650896109395, 38.59886229060776230, + 39.33988418719949465, 40.08411059791735198, 40.83150097453079752, + 41.58201578195490100, 42.33561646075348506, 43.09226539146988699, + 43.85192586067515208, 44.61456202863158893, 45.38013889847690052, + 46.14862228684032885, 46.91997879580877395, 47.69417578616628361, + 48.47118135183522014, 49.25096429545256882, 50.03349410501914463, + 50.81874093156324790, 51.60667556776436982, 52.39726942748592364, + 53.19049452616926743, 53.98632346204390586, 54.78472939811231157, + 55.58568604486942633, 56.38916764371992940, 57.19514895105859864, + 58.00360522298051080, 58.81451220059079787, 59.62784609588432261, + 60.44358357816834371, 61.26170176100199427, 62.08217818962842927, + 62.90499082887649962, 63.73011805151035958, 64.55753862700632340, + 65.38723171073768015, 66.21917683354901385, 67.05335389170279825, + 67.88974313718154008, 68.72832516833013017, 69.56908092082363737, + 70.41199165894616385, 71.25703896716800045, 72.10420474200799390, + 72.95347118416940191, 73.80482079093779646, 74.65823634883015814, + 75.51370092648485866, 76.37119786778275454, 77.23071078519033961, + 78.09222355331530707, 78.95572030266725960, 79.82118541361435859, + 80.68860351052903468, 81.55795945611502873, 82.42923834590904164, + 83.30242550295004378, 84.17750647261028973, 85.05446701758152983, + 85.93329311301090456, 86.81397094178107920, 87.69648688992882057, + 88.58082754219766741, 89.46697967771913795, 90.35493026581838194, + 91.24466646193963015, 92.13617560368709292, 93.02944520697742803, + 93.92446296229978486, 94.82121673107967297, 95.71969454214321615, + 96.61988458827809723, 97.52177522288820910, 98.42535495673848800, + 99.33061245478741341, 100.23753653310367895, 101.14611615586458981, + 102.05634043243354370, 102.96819861451382394, 103.88168009337621811, + 104.79677439715833032, 105.71347118823287303, 106.63176026064346047, + 107.55163153760463501, 108.47307506906540198, 109.39608102933323153, + 110.32063971475740516, 111.24674154146920557, 112.17437704317786995, + 113.10353686902013237, 114.03421178146170689, 114.96639265424990128, + 115.90007047041454769, 116.83523632031698014, 117.77188139974506953, + 118.70999700805310795, 119.64957454634490830, 120.59060551569974962, + 121.53308151543865279, 122.47699424143097247, 123.42233548443955726, + 124.36909712850338394, 125.31727114935689826, 126.26684961288492559, + 127.21782467361175861, 128.17018857322420899, 129.12393363912724453, + 130.07905228303084755, 131.03553699956862033, 131.99338036494577864, + 132.95257503561629164, 133.91311374698926784, 134.87498931216194364, + 135.83819462068046846, 136.80272263732638294, 137.76856640092901785, + 138.73571902320256299, 139.70417368760718091, 140.67392364823425055, + 141.64496222871400732, 142.61728282114600574, 143.59087888505104047, + 144.56574394634486680, 145.54187159633210058, 146.51925549072063859, + 147.49788934865566148, 148.47776695177302031, 149.45888214327129617, + 150.44122882700193600, 151.42480096657754984, 152.40959258449737490, + 153.39559776128982094, 154.38281063467164245, 155.37122539872302696, + 156.36083630307879844, 157.35163765213474107, 158.34362380426921391, + 159.33678917107920370, 160.33112821663092973, 161.32663545672428995, + 162.32330545817117695, 163.32113283808695314, 164.32011226319519892, + 165.32023844914485267, 166.32150615984036790, 167.32391020678358018, + 168.32744544842768164, 169.33210678954270634, 170.33788918059275375, + 171.34478761712384198, 172.35279713916281707, 173.36191283062726143, + 174.37212981874515094, 175.38344327348534080, 176.39584840699734514, + 177.40934047306160437, 178.42391476654847793, 179.43956662288721304, + 180.45629141754378111, 181.47408456550741107, 182.49294152078630304, + 183.51285777591152737, 184.53382886144947861, 185.55585034552262869, + 186.57891783333786861, 187.60302696672312095, 188.62817342367162610, + 189.65435291789341932, 190.68156119837468054, 191.70979404894376330, + 192.73904728784492590, 193.76931676731820176, 194.80059837318714244, + 195.83288802445184729, 196.86618167288995096, 197.90047530266301123, + 198.93576492992946214, 199.97204660246373464, 201.00931639928148797, + 202.04757043027063901, 203.08680483582807597, 204.12701578650228385, + 205.16819948264117102, 206.21035215404597807, 207.25347005962987623, + 208.29754948708190909, 209.34258675253678916, 210.38857820024875878, + 211.43552020227099320, 212.48340915813977858, 213.53224149456323744, + 214.58201366511514152, 215.63272214993284592, 216.68436345542014010, + 217.73693411395422004, 218.79043068359703739, 219.84484974781133815, + 220.90018791517996988, 221.95644181913033322, 223.01360811766215875, + 224.07168349307951871, 225.13066465172661879, 226.19054832372759734, + 227.25133126272962159, 228.31301024565024704, 229.37558207242807384, + 230.43904356577689896, 231.50339157094342113, 232.56862295546847008, + 233.63473460895144740, 234.70172344281823484, 235.76958639009222907, + 236.83832040516844586, 237.90792246359117712, 238.97838956183431947, + 240.04971871708477238, 241.12190696702904802, 242.19495136964280846, + 243.26884900298270509, 244.34359696498191283, 245.41919237324782443, + 246.49563236486270057, 247.57291409618682110, 248.65103474266476269, + 249.72999149863338175, 250.80978157713354904, 251.89040220972316320, + 252.97185064629374551, 254.05412415488834199, 255.13722002152300661, + 256.22113555000953511, 257.30586806178126835, 258.39141489572085675, + 259.47777340799029844, 260.56494097186322279, 261.65291497755913497, + 262.74169283208021852, 263.83127195904967266, 264.92164979855277807, + 266.01282380697938379, 267.10479145686849733, 268.19755023675537586, + 269.29109765101975427, 270.38543121973674488, 271.48054847852881721, + 272.57644697842033565, 273.67312428569374561, 274.77057798174683967, + 275.86880566295326389, 276.96780494052313770, 278.06757344036617496, + 279.16810880295668085, 280.26940868320008349, 281.37147075030043197, + 282.47429268763045229, 283.57787219260217171, 284.68220697654078322, + 285.78729476455760050, 286.89313329542699194, 287.99972032146268930, + 289.10705360839756395, 290.21513093526289140, 291.32395009427028754, + 292.43350889069523646, 293.54380514276073200, 294.65483668152336350, + 295.76660135076059532, 296.87909700685889902, 297.99232151870342022, + 299.10627276756946458, 300.22094864701409733, 301.33634706277030091, + 302.45246593264130297, 303.56930318639643929, 304.68685676566872189, + 305.80512462385280514, 306.92410472600477078, 308.04379504874236773, + 309.16419358014690033, 310.28529831966631036, 311.40710727801865687, + 312.52961847709792664, 313.65282994987899201, 314.77673974032603610, + 315.90134590329950015, 317.02664650446632777, 318.15263962020929966, + 319.27932333753892635, 320.40669575400545455, 321.53475497761127144, + 322.66349912672620803, 323.79292633000159185, 324.92303472628691452, + 326.05382246454587403, 327.18528770377525916, 328.31742861292224234, + 329.45024337080525356, 330.58373016603343331, 331.71788719692847280, + 332.85271267144611329, 333.98820480709991898, 335.12436183088397001, + 336.26118197919845443, 337.39866349777429377, 338.53680464159958774, + 339.67560367484657036, 340.81505887079896411, 341.95516851178109619, + 343.09593088908627578, 344.23734430290727460, 345.37940706226686416, + 346.52211748494903532, 347.66547389743118401, 348.80947463481720661, + 349.95411804077025408, 351.09940246744753267, 352.24532627543504759, + 353.39188783368263103, 354.53908551944078908, 355.68691771819692349, + 356.83538282361303118, 357.98447923746385868, 359.13420536957539753}; TEST_BEGIN(test_ln_gamma_misc) { unsigned i; - for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { + for (i = 1; i < sizeof(ln_gamma_misc_expected) / sizeof(double); i++) { double x = (double)i * 0.25; - expect_true(double_eq_rel(ln_gamma(x), - ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), + expect_true( + double_eq_rel(ln_gamma(x), ln_gamma_misc_expected[i], + MAX_REL_ERR, MAX_ABS_ERR), "Incorrect ln_gamma result for i=%u", i); } } TEST_END /* Expected pt_norm([0.01..0.99] increment=0.01). */ -static const double pt_norm_expected[] = { - -INFINITY, - -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, - -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, - -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, - -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, - -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, - -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, - -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, - -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, - -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, - -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, - -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, - -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, - -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, - -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, - -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, - -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, - -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, - 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, - 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, - 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, - 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, - 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, - 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, - 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, - 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, - 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, - 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, - 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, - 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, - 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, - 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, - 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, - 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 -}; +static const double pt_norm_expected[] = {-INFINITY, -2.32634787404084076, + -2.05374891063182252, -1.88079360815125085, -1.75068607125216946, + -1.64485362695147264, -1.55477359459685305, -1.47579102817917063, + -1.40507156030963221, -1.34075503369021654, -1.28155156554460081, + -1.22652812003661049, -1.17498679206608991, -1.12639112903880045, + -1.08031934081495606, -1.03643338949378938, -0.99445788320975281, + -0.95416525314619416, -0.91536508784281390, -0.87789629505122846, + -0.84162123357291418, -0.80642124701824025, -0.77219321418868492, + -0.73884684918521371, -0.70630256284008752, -0.67448975019608171, + -0.64334540539291685, -0.61281299101662701, -0.58284150727121620, + -0.55338471955567281, -0.52440051270804067, -0.49585034734745320, + -0.46769879911450812, -0.43991316567323380, -0.41246312944140462, + -0.38532046640756751, -0.35845879325119373, -0.33185334643681652, + -0.30548078809939738, -0.27931903444745404, -0.25334710313579978, + -0.22754497664114931, -0.20189347914185077, -0.17637416478086135, + -0.15096921549677725, -0.12566134685507399, -0.10043372051146975, + -0.07526986209982976, -0.05015358346473352, -0.02506890825871106, + 0.00000000000000000, 0.02506890825871106, 0.05015358346473366, + 0.07526986209982990, 0.10043372051146990, 0.12566134685507413, + 0.15096921549677739, 0.17637416478086146, 0.20189347914185105, + 0.22754497664114931, 0.25334710313579978, 0.27931903444745404, + 0.30548078809939738, 0.33185334643681652, 0.35845879325119373, + 0.38532046640756762, 0.41246312944140484, 0.43991316567323391, + 0.46769879911450835, 0.49585034734745348, 0.52440051270804111, + 0.55338471955567303, 0.58284150727121620, 0.61281299101662701, + 0.64334540539291685, 0.67448975019608171, 0.70630256284008752, + 0.73884684918521371, 0.77219321418868492, 0.80642124701824036, + 0.84162123357291441, 0.87789629505122879, 0.91536508784281423, + 0.95416525314619460, 0.99445788320975348, 1.03643338949378938, + 1.08031934081495606, 1.12639112903880045, 1.17498679206608991, + 1.22652812003661049, 1.28155156554460081, 1.34075503369021654, + 1.40507156030963265, 1.47579102817917085, 1.55477359459685394, + 1.64485362695147308, 1.75068607125217102, 1.88079360815125041, + 2.05374891063182208, 2.32634787404084076}; TEST_BEGIN(test_pt_norm) { unsigned i; - for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { + for (i = 1; i < sizeof(pt_norm_expected) / sizeof(double); i++) { double p = (double)i * 0.01; expect_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], - MAX_REL_ERR, MAX_ABS_ERR), + MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_norm result for i=%u", i); } } @@ -254,49 +251,49 @@ TEST_END * df={0.1, 1.1, 10.1, 100.1, 1000.1}). */ static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; -static const double pt_chi2_expected[] = { - 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, - 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, - 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, - 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, - 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, +static const double pt_chi2_expected[] = {1.168926411457320e-40, + 1.347680397072034e-22, 3.886980416666260e-17, 8.245951724356564e-14, + 2.068936347497604e-11, 1.562561743309233e-09, 5.459543043426564e-08, + 1.114775688149252e-06, 1.532101202364371e-05, 1.553884683726585e-04, + 1.239396954915939e-03, 8.153872320255721e-03, 4.631183739647523e-02, + 2.473187311701327e-01, 2.175254800183617e+00, - 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, - 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, - 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, - 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, - 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, + 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, + 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, + 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, + 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, + 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, - 2.606673548632508, 4.602913725294877, 5.646152813924212, - 6.488971315540869, 7.249823275816285, 7.977314231410841, - 8.700354939944047, 9.441728024225892, 10.224338321374127, - 11.076435368801061, 12.039320937038386, 13.183878752697167, - 14.657791935084575, 16.885728216339373, 23.361991680031817, + 2.606673548632508, 4.602913725294877, 5.646152813924212, 6.488971315540869, + 7.249823275816285, 7.977314231410841, 8.700354939944047, 9.441728024225892, + 10.224338321374127, 11.076435368801061, 12.039320937038386, + 13.183878752697167, 14.657791935084575, 16.885728216339373, + 23.361991680031817, - 70.14844087392152, 80.92379498849355, 85.53325420085891, - 88.94433120715347, 91.83732712857017, 94.46719943606301, - 96.96896479994635, 99.43412843510363, 101.94074719829733, - 104.57228644307247, 107.43900093448734, 110.71844673417287, - 114.76616819871325, 120.57422505959563, 135.92318818757556, + 70.14844087392152, 80.92379498849355, 85.53325420085891, 88.94433120715347, + 91.83732712857017, 94.46719943606301, 96.96896479994635, 99.43412843510363, + 101.94074719829733, 104.57228644307247, 107.43900093448734, + 110.71844673417287, 114.76616819871325, 120.57422505959563, + 135.92318818757556, - 899.0072447849649, 937.9271278858220, 953.8117189560207, - 965.3079371501154, 974.8974061207954, 983.4936235182347, - 991.5691170518946, 999.4334123954690, 1007.3391826856553, - 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, - 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 -}; + 899.0072447849649, 937.9271278858220, 953.8117189560207, 965.3079371501154, + 974.8974061207954, 983.4936235182347, 991.5691170518946, 999.4334123954690, + 1007.3391826856553, 1015.5445154999951, 1024.3777075619569, + 1034.3538789836223, 1046.4872561869577, 1063.5717461999654, + 1107.0741966053859}; TEST_BEGIN(test_pt_chi2) { unsigned i, j; unsigned e = 0; - for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { + for (i = 0; i < sizeof(pt_chi2_df) / sizeof(double); i++) { double df = pt_chi2_df[i]; double ln_gamma_df = ln_gamma(df * 0.5); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; - expect_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), - pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), + expect_true( + double_eq_rel(pt_chi2(p, df, ln_gamma_df), + pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_chi2 result for i=%u, j=%u", i, j); e++; } @@ -309,56 +306,56 @@ TEST_END * shape=[0.5..3.0] increment=0.5). */ static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; -static const double pt_gamma_expected[] = { - 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, - 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, - 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, - 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, - 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, +static const double pt_gamma_expected[] = {7.854392895485103e-05, + 5.043466107888016e-03, 1.788288957794883e-02, 3.900956150232906e-02, + 6.913847560638034e-02, 1.093710833465766e-01, 1.613412523825817e-01, + 2.274682115597864e-01, 3.114117323127083e-01, 4.189466220207417e-01, + 5.598106789059246e-01, 7.521856146202706e-01, 1.036125427911119e+00, + 1.532450860038180e+00, 3.317448300510606e+00, - 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, - 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, - 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, - 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, - 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, + 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, + 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, + 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, + 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, + 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, - 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, - 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, - 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, - 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, - 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, + 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, + 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, + 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, + 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, + 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, - 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, - 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, - 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, - 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, - 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, + 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, + 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, + 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, + 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, + 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, - 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, - 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, - 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, - 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, - 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, + 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, + 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, + 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, + 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, + 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, - 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, - 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, - 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, - 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, - 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 -}; + 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, + 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, + 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, + 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, + 4.7230515633946677, 5.6417477865306020, 8.4059469148854635}; TEST_BEGIN(test_pt_gamma_shape) { unsigned i, j; unsigned e = 0; - for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { + for (i = 0; i < sizeof(pt_gamma_shape) / sizeof(double); i++) { double shape = pt_gamma_shape[i]; double ln_gamma_shape = ln_gamma(shape); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; - expect_true(double_eq_rel(pt_gamma(p, shape, 1.0, - ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, - MAX_ABS_ERR), + expect_true( + double_eq_rel( + pt_gamma(p, shape, 1.0, ln_gamma_shape), + pt_gamma_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_gamma result for i=%u, j=%u", i, j); e++; } @@ -370,21 +367,16 @@ TEST_BEGIN(test_pt_gamma_scale) { double shape = 1.0; double ln_gamma_shape = ln_gamma(shape); - expect_true(double_eq_rel( - pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, - pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, - MAX_ABS_ERR), + expect_true( + double_eq_rel(pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, + pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, + MAX_ABS_ERR), "Scale should be trivially equivalent to external multiplication"); } TEST_END int main(void) { - return test( - test_ln_gamma_factorial, - test_ln_gamma_misc, - test_pt_norm, - test_pt_chi2, - test_pt_gamma_shape, - test_pt_gamma_scale); + return test(test_ln_gamma_factorial, test_ln_gamma_misc, test_pt_norm, + test_pt_chi2, test_pt_gamma_shape, test_pt_gamma_scale); } diff --git a/test/unit/mpsc_queue.c b/test/unit/mpsc_queue.c index 895edf84..d22d5488 100644 --- a/test/unit/mpsc_queue.c +++ b/test/unit/mpsc_queue.c @@ -12,10 +12,10 @@ struct elem_s { }; /* Include both proto and gen to make sure they match up. */ -mpsc_queue_proto(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t, - elem_list_t); -mpsc_queue_gen(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t, - elem_list_t, link); +mpsc_queue_proto( + static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t, elem_list_t); +mpsc_queue_gen( + static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t, elem_list_t, link); static void init_elems_simple(elem_t *elems, int nelems, int thread) { @@ -29,8 +29,8 @@ init_elems_simple(elem_t *elems, int nelems, int thread) { static void check_elems_simple(elem_list_t *list, int nelems, int thread) { elem_t *elem; - int next_idx = 0; - ql_foreach(elem, list, link) { + int next_idx = 0; + ql_foreach (elem, list, link) { expect_d_lt(next_idx, nelems, "Too many list items"); expect_d_eq(thread, elem->thread, ""); expect_d_eq(next_idx, elem->idx, "List out of order"); @@ -39,9 +39,9 @@ check_elems_simple(elem_list_t *list, int nelems, int thread) { } TEST_BEGIN(test_simple) { - enum {NELEMS = 10}; - elem_t elems[NELEMS]; - elem_list_t list; + enum { NELEMS = 10 }; + elem_t elems[NELEMS]; + elem_list_t list; elem_mpsc_queue_t queue; /* Pop empty queue onto empty list -> empty list */ @@ -82,7 +82,6 @@ TEST_BEGIN(test_simple) { } elem_mpsc_queue_pop_batch(&queue, &list); check_elems_simple(&list, NELEMS, 0); - } TEST_END @@ -137,7 +136,7 @@ TEST_BEGIN(test_push_single_or_batch) { TEST_END TEST_BEGIN(test_multi_op) { - enum {NELEMS = 20}; + enum { NELEMS = 20 }; elem_t elems[NELEMS]; init_elems_simple(elems, NELEMS, 0); elem_list_t push_list; @@ -176,30 +175,29 @@ TEST_BEGIN(test_multi_op) { elem_mpsc_queue_pop_batch(&queue, &result_list); check_elems_simple(&result_list, NELEMS, 0); - } TEST_END typedef struct pusher_arg_s pusher_arg_t; struct pusher_arg_s { elem_mpsc_queue_t *queue; - int thread; - elem_t *elems; - int nelems; + int thread; + elem_t *elems; + int nelems; }; typedef struct popper_arg_s popper_arg_t; struct popper_arg_s { elem_mpsc_queue_t *queue; - int npushers; - int nelems_per_pusher; - int *pusher_counts; + int npushers; + int nelems_per_pusher; + int *pusher_counts; }; static void * thd_pusher(void *void_arg) { pusher_arg_t *arg = (pusher_arg_t *)void_arg; - int next_idx = 0; + int next_idx = 0; while (next_idx < arg->nelems) { /* Push 10 items in batch. */ elem_list_t list; @@ -216,7 +214,6 @@ thd_pusher(void *void_arg) { elem_mpsc_queue_push(arg->queue, &arg->elems[next_idx]); next_idx++; } - } return NULL; } @@ -224,13 +221,13 @@ thd_pusher(void *void_arg) { static void * thd_popper(void *void_arg) { popper_arg_t *arg = (popper_arg_t *)void_arg; - int done_pushers = 0; + int done_pushers = 0; while (done_pushers < arg->npushers) { elem_list_t list; ql_new(&list); elem_mpsc_queue_pop_batch(arg->queue, &list); elem_t *elem; - ql_foreach(elem, &list, link) { + ql_foreach (elem, &list, link) { int thread = elem->thread; int idx = elem->idx; expect_d_eq(arg->pusher_counts[thread], idx, @@ -248,12 +245,12 @@ thd_popper(void *void_arg) { TEST_BEGIN(test_multiple_threads) { enum { NPUSHERS = 4, - NELEMS_PER_PUSHER = 1000*1000, + NELEMS_PER_PUSHER = 1000 * 1000, }; - thd_t pushers[NPUSHERS]; + thd_t pushers[NPUSHERS]; pusher_arg_t pusher_arg[NPUSHERS]; - thd_t popper; + thd_t popper; popper_arg_t popper_arg; elem_mpsc_queue_t queue; @@ -296,9 +293,6 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_simple, - test_push_single_or_batch, - test_multi_op, - test_multiple_threads); + return test_no_reentrancy(test_simple, test_push_single_or_batch, + test_multi_op, test_multiple_threads); } diff --git a/test/unit/mq.c b/test/unit/mq.c index f833f77c..9b3b547a 100644 --- a/test/unit/mq.c +++ b/test/unit/mq.c @@ -1,22 +1,22 @@ #include "test/jemalloc_test.h" -#define NSENDERS 3 -#define NMSGS 100000 +#define NSENDERS 3 +#define NMSGS 100000 typedef struct mq_msg_s mq_msg_t; struct mq_msg_s { - mq_msg(mq_msg_t) link; + mq_msg(mq_msg_t) link; }; mq_gen(static, mq_, mq_t, mq_msg_t, link) -TEST_BEGIN(test_mq_basic) { - mq_t mq; + TEST_BEGIN(test_mq_basic) { + mq_t mq; mq_msg_t msg; expect_false(mq_init(&mq), "Unexpected mq_init() failure"); expect_u_eq(mq_count(&mq), 0, "mq should be empty"); - expect_ptr_null(mq_tryget(&mq), - "mq_tryget() should fail when the queue is empty"); + expect_ptr_null( + mq_tryget(&mq), "mq_tryget() should fail when the queue is empty"); mq_put(&mq, &msg); expect_u_eq(mq_count(&mq), 1, "mq should contain one message"); @@ -31,7 +31,7 @@ TEST_END static void * thd_receiver_start(void *arg) { - mq_t *mq = (mq_t *)arg; + mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < (NSENDERS * NMSGS); i++) { @@ -44,12 +44,12 @@ thd_receiver_start(void *arg) { static void * thd_sender_start(void *arg) { - mq_t *mq = (mq_t *)arg; + mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < NMSGS; i++) { mq_msg_t *msg; - void *p; + void *p; p = mallocx(sizeof(mq_msg_t), 0); expect_ptr_not_null(p, "Unexpected mallocx() failure"); msg = (mq_msg_t *)p; @@ -59,9 +59,9 @@ thd_sender_start(void *arg) { } TEST_BEGIN(test_mq_threaded) { - mq_t mq; - thd_t receiver; - thd_t senders[NSENDERS]; + mq_t mq; + thd_t receiver; + thd_t senders[NSENDERS]; unsigned i; expect_false(mq_init(&mq), "Unexpected mq_init() failure"); @@ -82,8 +82,5 @@ TEST_END int main(void) { - return test( - test_mq_basic, - test_mq_threaded); + return test(test_mq_basic, test_mq_threaded); } - diff --git a/test/unit/mtx.c b/test/unit/mtx.c index 4aeebc13..0fe15a90 100644 --- a/test/unit/mtx.c +++ b/test/unit/mtx.c @@ -1,7 +1,7 @@ #include "test/jemalloc_test.h" -#define NTHREADS 2 -#define NINCRS 2000000 +#define NTHREADS 2 +#define NINCRS 2000000 TEST_BEGIN(test_mtx_basic) { mtx_t mtx; @@ -14,14 +14,14 @@ TEST_BEGIN(test_mtx_basic) { TEST_END typedef struct { - mtx_t mtx; - unsigned x; + mtx_t mtx; + unsigned x; } thd_start_arg_t; static void * thd_start(void *varg) { thd_start_arg_t *arg = (thd_start_arg_t *)varg; - unsigned i; + unsigned i; for (i = 0; i < NINCRS; i++) { mtx_lock(&arg->mtx); @@ -33,8 +33,8 @@ thd_start(void *varg) { TEST_BEGIN(test_mtx_race) { thd_start_arg_t arg; - thd_t thds[NTHREADS]; - unsigned i; + thd_t thds[NTHREADS]; + unsigned i; expect_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); arg.x = 0; @@ -44,14 +44,12 @@ TEST_BEGIN(test_mtx_race) { for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); } - expect_u_eq(arg.x, NTHREADS * NINCRS, - "Race-related counter corruption"); + expect_u_eq( + arg.x, NTHREADS * NINCRS, "Race-related counter corruption"); } TEST_END int main(void) { - return test( - test_mtx_basic, - test_mtx_race); + return test(test_mtx_basic, test_mtx_race); } diff --git a/test/unit/ncached_max.c b/test/unit/ncached_max.c index 1a0d2885..4724f55b 100644 --- a/test/unit/ncached_max.c +++ b/test/unit/ncached_max.c @@ -2,10 +2,10 @@ #include "test/san.h" const char *malloc_conf = -"tcache_ncached_max:256-1024:1001|2048-2048:0|8192-8192:1,tcache_max:4096"; + "tcache_ncached_max:256-1024:1001|2048-2048:0|8192-8192:1,tcache_max:4096"; extern void tcache_bin_info_compute( cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]); -extern bool tcache_get_default_ncached_max_set(szind_t ind); +extern bool tcache_get_default_ncached_max_set(szind_t ind); extern const cache_bin_info_t *tcache_get_default_ncached_max(void); static void @@ -13,54 +13,54 @@ check_bins_info(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) { size_t mib_get[4], mib_get_len; mib_get_len = sizeof(mib_get) / sizeof(size_t); const char *get_name = "thread.tcache.ncached_max.read_sizeclass"; - size_t ncached_max; - size_t sz = sizeof(size_t); + size_t ncached_max; + size_t sz = sizeof(size_t); expect_d_eq(mallctlnametomib(get_name, mib_get, &mib_get_len), 0, "Unexpected mallctlnametomib() failure"); for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) { size_t bin_size = sz_index2size(i); - expect_d_eq(mallctlbymib(mib_get, mib_get_len, - (void *)&ncached_max, &sz, - (void *)&bin_size, sizeof(size_t)), 0, - "Unexpected mallctlbymib() failure"); + expect_d_eq( + mallctlbymib(mib_get, mib_get_len, (void *)&ncached_max, + &sz, (void *)&bin_size, sizeof(size_t)), + 0, "Unexpected mallctlbymib() failure"); expect_zu_eq(ncached_max, tcache_bin_info[i].ncached_max, "Unexpected ncached_max for bin %d", i); /* Check ncached_max returned under a non-bin size. */ bin_size--; size_t temp_ncached_max = 0; expect_d_eq(mallctlbymib(mib_get, mib_get_len, - (void *)&temp_ncached_max, &sz, - (void *)&bin_size, sizeof(size_t)), 0, - "Unexpected mallctlbymib() failure"); + (void *)&temp_ncached_max, &sz, + (void *)&bin_size, sizeof(size_t)), + 0, "Unexpected mallctlbymib() failure"); expect_zu_eq(temp_ncached_max, ncached_max, "Unexpected ncached_max for inaccurate bin size."); } } static void * -ncached_max_check(void* args) { +ncached_max_check(void *args) { cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]; cache_bin_info_t tcache_bin_info_backup[TCACHE_NBINS_MAX]; - tsd_t *tsd = tsd_fetch(); - tcache_t *tcache = tsd_tcachep_get(tsd); + tsd_t *tsd = tsd_fetch(); + tcache_t *tcache = tsd_tcachep_get(tsd); assert(tcache != NULL); tcache_slow_t *tcache_slow = tcache->tcache_slow; - tcache_bin_info_compute(tcache_bin_info); - memcpy(tcache_bin_info_backup, tcache_bin_info, - sizeof(tcache_bin_info)); + memcpy( + tcache_bin_info_backup, tcache_bin_info, sizeof(tcache_bin_info)); /* Check ncached_max set by malloc_conf. */ for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) { - bool first_range = (i >= sz_size2index(256) && - i <= sz_size2index(1024)); - bool second_range = (i == sz_size2index(2048)); - bool third_range = (i == sz_size2index(8192)); + bool first_range = (i >= sz_size2index(256) + && i <= sz_size2index(1024)); + bool second_range = (i == sz_size2index(2048)); + bool third_range = (i == sz_size2index(8192)); cache_bin_sz_t target_ncached_max = 0; if (first_range || second_range || third_range) { - target_ncached_max = first_range ? 1001: - (second_range ? 0: 1); + target_ncached_max = first_range + ? 1001 + : (second_range ? 0 : 1); expect_true(tcache_get_default_ncached_max_set(i), "Unexpected state for bin %u", i); expect_zu_eq(target_ncached_max, @@ -88,13 +88,13 @@ ncached_max_check(void* args) { "Unexpected mallctlnametomib() failure"); /* Test the ncached_max set with tcache on. */ - char inputs[100] = "8-128:1|160-160:11|170-320:22|224-8388609:0"; + char inputs[100] = "8-128:1|160-160:11|170-320:22|224-8388609:0"; char *inputp = inputs; expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), 0, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + 0, "Unexpected mallctlbymib() failure"); for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) { - if (i >= sz_size2index(8) &&i <= sz_size2index(128)) { + if (i >= sz_size2index(8) && i <= sz_size2index(128)) { cache_bin_info_init(&tcache_bin_info[i], 1); } if (i == sz_size2index(160)) { @@ -119,16 +119,17 @@ ncached_max_check(void* args) { * the new setting will not be carried on. Instead, the default * settings will be applied. */ - bool e0 = false, e1; + bool e0 = false, e1; size_t bool_sz = sizeof(bool); expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz, - (void *)&e0, bool_sz), 0, "Unexpected mallctl() error"); + (void *)&e0, bool_sz), + 0, "Unexpected mallctl() error"); expect_true(e1, "Unexpected previous tcache state"); strcpy(inputs, "0-112:8"); /* Setting returns ENOENT when the tcache is disabled. */ expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), ENOENT, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + ENOENT, "Unexpected mallctlbymib() failure"); /* All ncached_max should return 0 once tcache is disabled. */ for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) { cache_bin_info_init(&tcache_bin_info[i], 0); @@ -137,12 +138,13 @@ ncached_max_check(void* args) { e0 = true; expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz, - (void *)&e0, bool_sz), 0, "Unexpected mallctl() error"); + (void *)&e0, bool_sz), + 0, "Unexpected mallctl() error"); expect_false(e1, "Unexpected previous tcache state"); memcpy(tcache_bin_info, tcache_bin_info_backup, sizeof(tcache_bin_info_backup)); for (szind_t i = tcache_nbins_get(tcache_slow); i < TCACHE_NBINS_MAX; - i++) { + i++) { cache_bin_info_init(&tcache_bin_info[i], 0); } check_bins_info(tcache_bin_info); @@ -152,22 +154,22 @@ ncached_max_check(void* args) { * resetting tcache_max. The ncached_max changes should stay. */ size_t tcache_max = 1024; - assert_d_eq(mallctl("thread.tcache.max", - NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0, - "Unexpected.mallctl().failure"); + assert_d_eq(mallctl("thread.tcache.max", NULL, NULL, + (void *)&tcache_max, sizeof(size_t)), + .0, "Unexpected.mallctl().failure"); for (szind_t i = sz_size2index(1024) + 1; i < TCACHE_NBINS_MAX; i++) { cache_bin_info_init(&tcache_bin_info[i], 0); } strcpy(inputs, "2048-6144:123"); expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), 0, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + 0, "Unexpected mallctlbymib() failure"); check_bins_info(tcache_bin_info); tcache_max = 6144; - assert_d_eq(mallctl("thread.tcache.max", - NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0, - "Unexpected.mallctl().failure"); + assert_d_eq(mallctl("thread.tcache.max", NULL, NULL, + (void *)&tcache_max, sizeof(size_t)), + .0, "Unexpected.mallctl().failure"); memcpy(tcache_bin_info, tcache_bin_info_backup, sizeof(tcache_bin_info_backup)); for (szind_t i = sz_size2index(2048); i < TCACHE_NBINS_MAX; i++) { @@ -182,15 +184,15 @@ ncached_max_check(void* args) { /* Test an empty input, it should do nothing. */ strcpy(inputs, ""); expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), 0, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + 0, "Unexpected mallctlbymib() failure"); check_bins_info(tcache_bin_info); /* Test a half-done string, it should return EINVAL and do nothing. */ strcpy(inputs, "4-1024:7|256-1024"); expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), EINVAL, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + EINVAL, "Unexpected mallctlbymib() failure"); check_bins_info(tcache_bin_info); /* @@ -199,8 +201,8 @@ ncached_max_check(void* args) { */ strcpy(inputs, "1024-256:7"); expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), 0, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + 0, "Unexpected mallctlbymib() failure"); check_bins_info(tcache_bin_info); /* @@ -216,8 +218,8 @@ ncached_max_check(void* args) { long_inputs[200 * 9 + 8] = '\0'; inputp = long_inputs; expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), EINVAL, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + EINVAL, "Unexpected mallctlbymib() failure"); check_bins_info(tcache_bin_info); free(long_inputs); @@ -228,17 +230,17 @@ ncached_max_check(void* args) { strcpy(inputs, "k8-1024:77p"); inputp = inputs; expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), EINVAL, - "Unexpected mallctlbymib() failure"); + (void *)&inputp, sizeof(char *)), + EINVAL, "Unexpected mallctlbymib() failure"); check_bins_info(tcache_bin_info); /* Test large ncached_max, it should return success but capped. */ strcpy(inputs, "1024-1024:65540"); expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL, - (void *)&inputp, sizeof(char *)), 0, - "Unexpected mallctlbymib() failure"); - cache_bin_info_init(&tcache_bin_info[sz_size2index(1024)], - CACHE_BIN_NCACHED_MAX); + (void *)&inputp, sizeof(char *)), + 0, "Unexpected mallctlbymib() failure"); + cache_bin_info_init( + &tcache_bin_info[sz_size2index(1024)], CACHE_BIN_NCACHED_MAX); check_bins_info(tcache_bin_info); return NULL; @@ -262,7 +264,5 @@ TEST_END int main(void) { - return test( - test_ncached_max); + return test(test_ncached_max); } - diff --git a/test/unit/nstime.c b/test/unit/nstime.c index 43fd3954..8c095d09 100644 --- a/test/unit/nstime.c +++ b/test/unit/nstime.c @@ -1,6 +1,6 @@ #include "test/jemalloc_test.h" -#define BILLION UINT64_C(1000000000) +#define BILLION UINT64_C(1000000000) TEST_BEGIN(test_nstime_init) { nstime_t nst; @@ -43,24 +43,24 @@ TEST_BEGIN(test_nstime_compare) { nstime_init2(&nstb, 42, 42); expect_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); - expect_d_eq(nstime_compare(&nstb, &nsta), -1, - "nstb should be less than nsta"); + expect_d_eq( + nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 42, 44); - expect_d_eq(nstime_compare(&nsta, &nstb), -1, - "nsta should be less than nstb"); + expect_d_eq( + nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); expect_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); nstime_init2(&nstb, 41, BILLION - 1); expect_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); - expect_d_eq(nstime_compare(&nstb, &nsta), -1, - "nstb should be less than nsta"); + expect_d_eq( + nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 43, 0); - expect_d_eq(nstime_compare(&nsta, &nstb), -1, - "nsta should be less than nstb"); + expect_d_eq( + nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); expect_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); } @@ -73,15 +73,15 @@ TEST_BEGIN(test_nstime_add) { nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 84, 86); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect addition result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); nstime_init2(&nsta, 42, BILLION - 1); nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 85, BILLION - 2); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect addition result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); } TEST_END @@ -91,14 +91,14 @@ TEST_BEGIN(test_nstime_iadd) { nstime_init2(&nsta, 42, BILLION - 1); nstime_iadd(&nsta, 1); nstime_init2(&nstb, 43, 0); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect addition result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); nstime_init2(&nsta, 42, 1); nstime_iadd(&nsta, BILLION + 1); nstime_init2(&nstb, 43, 2); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect addition result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); } TEST_END @@ -109,15 +109,15 @@ TEST_BEGIN(test_nstime_subtract) { nstime_copy(&nstb, &nsta); nstime_subtract(&nsta, &nstb); nstime_init_zero(&nstb); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect subtraction result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); nstime_init2(&nsta, 42, 43); nstime_init2(&nstb, 41, 44); nstime_subtract(&nsta, &nstb); nstime_init2(&nstb, 0, BILLION - 1); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect subtraction result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); } TEST_END @@ -125,16 +125,16 @@ TEST_BEGIN(test_nstime_isubtract) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); - nstime_isubtract(&nsta, 42*BILLION + 43); + nstime_isubtract(&nsta, 42 * BILLION + 43); nstime_init_zero(&nstb); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect subtraction result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); nstime_init2(&nsta, 42, 43); - nstime_isubtract(&nsta, 41*BILLION + 44); + nstime_isubtract(&nsta, 41 * BILLION + 44); nstime_init2(&nstb, 0, BILLION - 1); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect subtraction result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); } TEST_END @@ -144,14 +144,14 @@ TEST_BEGIN(test_nstime_imultiply) { nstime_init2(&nsta, 42, 43); nstime_imultiply(&nsta, 10); nstime_init2(&nstb, 420, 430); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect multiplication result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); nstime_init2(&nsta, 42, 666666666); nstime_imultiply(&nsta, 3); nstime_init2(&nstb, 127, 999999998); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect multiplication result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); } TEST_END @@ -162,15 +162,15 @@ TEST_BEGIN(test_nstime_idivide) { nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_idivide(&nsta, 10); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect division result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); nstime_init2(&nsta, 42, 666666666); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 3); nstime_idivide(&nsta, 3); - expect_d_eq(nstime_compare(&nsta, &nstb), 0, - "Incorrect division result"); + expect_d_eq( + nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); } TEST_END @@ -180,24 +180,24 @@ TEST_BEGIN(test_nstime_divide) { nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); - expect_u64_eq(nstime_divide(&nsta, &nstb), 10, - "Incorrect division result"); + expect_u64_eq( + nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_add(&nsta, &nstc); - expect_u64_eq(nstime_divide(&nsta, &nstb), 10, - "Incorrect division result"); + expect_u64_eq( + nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_subtract(&nsta, &nstc); - expect_u64_eq(nstime_divide(&nsta, &nstb), 9, - "Incorrect division result"); + expect_u64_eq( + nstime_divide(&nsta, &nstb), 9, "Incorrect division result"); } TEST_END @@ -213,8 +213,8 @@ test_nstime_since_once(nstime_t *t) { nstime_copy(&new_t, t); nstime_subtract(&new_t, &old_t); - expect_u64_ge(nstime_ns(&new_t), ns_since, - "Incorrect time since result"); + expect_u64_ge( + nstime_ns(&new_t), ns_since, "Incorrect time since result"); } TEST_BEGIN(test_nstime_ns_since) { @@ -253,19 +253,9 @@ TEST_END int main(void) { - return test( - test_nstime_init, - test_nstime_init2, - test_nstime_copy, - test_nstime_compare, - test_nstime_add, - test_nstime_iadd, - test_nstime_subtract, - test_nstime_isubtract, - test_nstime_imultiply, - test_nstime_idivide, - test_nstime_divide, - test_nstime_ns_since, - test_nstime_ms_since, - test_nstime_monotonic); + return test(test_nstime_init, test_nstime_init2, test_nstime_copy, + test_nstime_compare, test_nstime_add, test_nstime_iadd, + test_nstime_subtract, test_nstime_isubtract, test_nstime_imultiply, + test_nstime_idivide, test_nstime_divide, test_nstime_ns_since, + test_nstime_ms_since, test_nstime_monotonic); } diff --git a/test/unit/oversize_threshold.c b/test/unit/oversize_threshold.c index 95ce6537..5d9aae10 100644 --- a/test/unit/oversize_threshold.c +++ b/test/unit/oversize_threshold.c @@ -5,7 +5,7 @@ static void arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp, size_t *oldlen, void *newp, size_t newlen) { - int err; + int err; char buf[100]; malloc_snprintf(buf, sizeof(buf), mallctl_str, arena); @@ -14,13 +14,13 @@ arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp, } TEST_BEGIN(test_oversize_threshold_get_set) { - int err; + int err; size_t old_threshold; size_t new_threshold; size_t threshold_sz = sizeof(old_threshold); unsigned arena; - size_t arena_sz = sizeof(arena); + size_t arena_sz = sizeof(arena); err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0); expect_d_eq(0, err, "Arena creation failed"); @@ -38,13 +38,14 @@ TEST_BEGIN(test_oversize_threshold_get_set) { /* Just a read */ arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold, &threshold_sz, NULL, 0); - expect_zu_eq(2 * 1024 * 1024, old_threshold, "Should have read old value"); + expect_zu_eq( + 2 * 1024 * 1024, old_threshold, "Should have read old value"); } TEST_END static size_t max_purged = 0; static bool -purge_forced_record_max(extent_hooks_t* hooks, void *addr, size_t sz, +purge_forced_record_max(extent_hooks_t *hooks, void *addr, size_t sz, size_t offset, size_t length, unsigned arena_ind) { if (length > max_purged) { max_purged = length; @@ -73,7 +74,7 @@ TEST_BEGIN(test_oversize_threshold) { int err; unsigned arena; - size_t arena_sz = sizeof(arena); + size_t arena_sz = sizeof(arena); err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0); expect_d_eq(0, err, "Arena creation failed"); arena_mallctl("arena.%u.extent_hooks", arena, NULL, NULL, &extent_hooks, @@ -121,8 +122,8 @@ TEST_BEGIN(test_oversize_threshold) { ptr = mallocx(2 * 1024 * 1024, MALLOCX_ARENA(arena)); dallocx(ptr, MALLOCX_TCACHE_NONE); if (!is_background_thread_enabled()) { - expect_zu_ge(max_purged, 2 * 1024 * 1024, - "Expected a 2MB purge"); + expect_zu_ge( + max_purged, 2 * 1024 * 1024, "Expected a 2MB purge"); } } TEST_END @@ -130,7 +131,5 @@ TEST_END int main(void) { return test_no_reentrancy( - test_oversize_threshold_get_set, - test_oversize_threshold); + test_oversize_threshold_get_set, test_oversize_threshold); } - diff --git a/test/unit/pa.c b/test/unit/pa.c index d44bb95c..8552225f 100644 --- a/test/unit/pa.c +++ b/test/unit/pa.c @@ -16,8 +16,8 @@ merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, } static bool -split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { +split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, + size_t size_b, bool committed, unsigned arena_ind) { return !maps_coalesce; } @@ -39,13 +39,13 @@ init_test_extent_hooks(extent_hooks_t *hooks) { typedef struct test_data_s test_data_t; struct test_data_s { - pa_shard_t shard; - pa_central_t central; - base_t *base; - emap_t emap; + pa_shard_t shard; + pa_central_t central; + base_t *base; + emap_t emap; pa_shard_stats_t stats; - malloc_mutex_t stats_mtx; - extent_hooks_t hooks; + malloc_mutex_t stats_mtx; + extent_hooks_t hooks; }; static test_data_t * @@ -66,8 +66,8 @@ init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { nstime_t time; nstime_init(&time, 0); - err = pa_central_init(&test_data->central, base, opt_hpa, - &hpa_hooks_default); + err = pa_central_init( + &test_data->central, base, opt_hpa, &hpa_hooks_default); assert_false(err, ""); const size_t pa_oversize_threshold = 8 * 1024 * 1024; @@ -80,7 +80,8 @@ init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { return test_data; } -void destroy_test_data(test_data_t *data) { +void +destroy_test_data(test_data_t *data) { base_delete(TSDN_NULL, data->base); free(data); } @@ -89,28 +90,28 @@ static void * do_alloc_free_purge(void *arg) { test_data_t *test_data = (test_data_t *)arg; for (int i = 0; i < 10 * 1000; i++) { - bool deferred_work_generated = false; + bool deferred_work_generated = false; edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE, PAGE, /* slab */ false, /* szind */ 0, /* zero */ false, /* guarded */ false, &deferred_work_generated); assert_ptr_not_null(edata, ""); pa_dalloc(TSDN_NULL, &test_data->shard, edata, &deferred_work_generated); - malloc_mutex_lock(TSDN_NULL, - &test_data->shard.pac.decay_dirty.mtx); + malloc_mutex_lock( + TSDN_NULL, &test_data->shard.pac.decay_dirty.mtx); pac_decay_all(TSDN_NULL, &test_data->shard.pac, &test_data->shard.pac.decay_dirty, &test_data->shard.pac.stats->decay_dirty, &test_data->shard.pac.ecache_dirty, true); - malloc_mutex_unlock(TSDN_NULL, - &test_data->shard.pac.decay_dirty.mtx); + malloc_mutex_unlock( + TSDN_NULL, &test_data->shard.pac.decay_dirty.mtx); } return NULL; } TEST_BEGIN(test_alloc_free_purge_thds) { test_data_t *test_data = init_test_data(0, 0); - thd_t thds[4]; + thd_t thds[4]; for (int i = 0; i < 4; i++) { thd_create(&thds[i], do_alloc_free_purge, test_data); } @@ -122,6 +123,5 @@ TEST_END int main(void) { - return test( - test_alloc_free_purge_thds); + return test(test_alloc_free_purge_thds); } diff --git a/test/unit/pack.c b/test/unit/pack.c index e6392825..e3024512 100644 --- a/test/unit/pack.c +++ b/test/unit/pack.c @@ -4,9 +4,9 @@ * Size class that is a divisor of the page size, ideally 4+ regions per run. */ #if LG_PAGE <= 14 -#define SZ (ZU(1) << (LG_PAGE - 2)) +# define SZ (ZU(1) << (LG_PAGE - 2)) #else -#define SZ ZU(4096) +# define SZ ZU(4096) #endif /* @@ -14,11 +14,11 @@ * if mmap()ed memory grows downward, downward growth of mmap()ed memory is * tested. */ -#define NSLABS 8 +#define NSLABS 8 static unsigned binind_compute(void) { - size_t sz; + size_t sz; unsigned nbins, i; sz = sizeof(nbins); @@ -27,16 +27,17 @@ binind_compute(void) { for (i = 0; i < nbins; i++) { size_t mib[4]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); size_t size; - expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, - &miblen), 0, "Unexpected mallctlnametomb failure"); + expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), + 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)i; sz = sizeof(size); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, - 0), 0, "Unexpected mallctlbymib failure"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, 0), 0, + "Unexpected mallctlbymib failure"); if (size == SZ) { return i; } @@ -49,24 +50,24 @@ binind_compute(void) { static size_t nregs_per_run_compute(void) { uint32_t nregs; - size_t sz; + size_t sz; unsigned binind = binind_compute(); - size_t mib[4]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)binind; sz = sizeof(nregs); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, - 0), 0, "Unexpected mallctlbymib failure"); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, 0), 0, + "Unexpected mallctlbymib failure"); return nregs; } static unsigned arenas_create_mallctl(void) { unsigned arena_ind; - size_t sz; + size_t sz; sz = sizeof(arena_ind); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), @@ -78,7 +79,7 @@ arenas_create_mallctl(void) { static void arena_reset_mallctl(unsigned arena_ind) { size_t mib[3]; - size_t miblen = sizeof(mib)/sizeof(size_t); + size_t miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); @@ -88,23 +89,23 @@ arena_reset_mallctl(unsigned arena_ind) { } TEST_BEGIN(test_pack) { - bool prof_enabled; + bool prof_enabled; size_t sz = sizeof(prof_enabled); if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) { test_skip_if(prof_enabled); } unsigned arena_ind = arenas_create_mallctl(); - size_t nregs_per_run = nregs_per_run_compute(); - size_t nregs = nregs_per_run * NSLABS; + size_t nregs_per_run = nregs_per_run_compute(); + size_t nregs = nregs_per_run * NSLABS; VARIABLE_ARRAY(void *, ptrs, nregs); size_t i, j, offset; /* Fill matrix. */ for (i = offset = 0; i < NSLABS; i++) { for (j = 0; j < nregs_per_run; j++) { - void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | - MALLOCX_TCACHE_NONE); + void *p = mallocx( + SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); expect_ptr_not_null(p, "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |" " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu", @@ -119,16 +120,15 @@ TEST_BEGIN(test_pack) { * layout policy. */ offset = 0; - for (i = offset = 0; - i < NSLABS; - i++, offset = (offset + 1) % nregs_per_run) { + for (i = offset = 0; i < NSLABS; + i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p = ptrs[(i * nregs_per_run) + j]; if (offset == j) { continue; } - dallocx(p, MALLOCX_ARENA(arena_ind) | - MALLOCX_TCACHE_NONE); + dallocx( + p, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); } } @@ -137,17 +137,16 @@ TEST_BEGIN(test_pack) { * that the matrix is unmodified. */ offset = 0; - for (i = offset = 0; - i < NSLABS; - i++, offset = (offset + 1) % nregs_per_run) { + for (i = offset = 0; i < NSLABS; + i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p; if (offset == j) { continue; } - p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | - MALLOCX_TCACHE_NONE); + p = mallocx( + SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); expect_ptr_eq(p, ptrs[(i * nregs_per_run) + j], "Unexpected refill discrepancy, run=%zu, reg=%zu\n", i, j); @@ -161,6 +160,5 @@ TEST_END int main(void) { - return test( - test_pack); + return test(test_pack); } diff --git a/test/unit/pages.c b/test/unit/pages.c index 8dfd1a72..dbee2f0c 100644 --- a/test/unit/pages.c +++ b/test/unit/pages.c @@ -2,8 +2,8 @@ TEST_BEGIN(test_pages_huge) { size_t alloc_size; - bool commit; - void *pages, *hugepage; + bool commit; + void *pages, *hugepage; alloc_size = HUGEPAGE * 2 - PAGE; commit = true; @@ -11,11 +11,12 @@ TEST_BEGIN(test_pages_huge) { expect_ptr_not_null(pages, "Unexpected pages_map() error"); if (init_system_thp_mode == thp_mode_default) { - hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE)); - expect_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge, - "Unexpected pages_huge() result"); - expect_false(pages_nohuge(hugepage, HUGEPAGE), - "Unexpected pages_nohuge() result"); + hugepage = (void *)(ALIGNMENT_CEILING( + (uintptr_t)pages, HUGEPAGE)); + expect_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge, + "Unexpected pages_huge() result"); + expect_false(pages_nohuge(hugepage, HUGEPAGE), + "Unexpected pages_nohuge() result"); } pages_unmap(pages, alloc_size); @@ -24,6 +25,5 @@ TEST_END int main(void) { - return test( - test_pages_huge); + return test(test_pages_huge); } diff --git a/test/unit/peak.c b/test/unit/peak.c index 11129785..80eda30d 100644 --- a/test/unit/peak.c +++ b/test/unit/peak.c @@ -4,11 +4,10 @@ TEST_BEGIN(test_peak) { peak_t peak = PEAK_INITIALIZER; - expect_u64_eq(0, peak_max(&peak), - "Peak should be zero at initialization"); + expect_u64_eq( + 0, peak_max(&peak), "Peak should be zero at initialization"); peak_update(&peak, 100, 50); - expect_u64_eq(50, peak_max(&peak), - "Missed update"); + expect_u64_eq(50, peak_max(&peak), "Missed update"); peak_update(&peak, 100, 100); expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak"); peak_update(&peak, 100, 200); @@ -42,6 +41,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_peak); + return test_no_reentrancy(test_peak); } diff --git a/test/unit/ph.c b/test/unit/ph.c index 0339f993..c9e4da9c 100644 --- a/test/unit/ph.c +++ b/test/unit/ph.c @@ -8,9 +8,9 @@ ph_structs(heap, node_t, BFS_ENUMERATE_MAX); struct node_s { #define NODE_MAGIC 0x9823af7e - uint32_t magic; + uint32_t magic; heap_link_t link; - uint64_t key; + uint64_t key; }; static int @@ -31,7 +31,6 @@ node_cmp(const node_t *a, const node_t *b) { static int node_cmp_magic(const node_t *a, const node_t *b) { - expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); @@ -58,12 +57,12 @@ node_lchild_get(const node_t *node) { static void node_print(const node_t *node, unsigned depth) { unsigned i; - node_t *leftmost_child, *sibling; + node_t *leftmost_child, *sibling; for (i = 0; i < depth; i++) { malloc_printf("\t"); } - malloc_printf("%2"FMTu64"\n", node->key); + malloc_printf("%2" FMTu64 "\n", node->key); leftmost_child = node_lchild_get(node); if (leftmost_child == NULL) { @@ -71,8 +70,8 @@ node_print(const node_t *node, unsigned depth) { } node_print(leftmost_child, depth + 1); - for (sibling = node_next_get(leftmost_child); sibling != - NULL; sibling = node_next_get(sibling)) { + for (sibling = node_next_get(leftmost_child); sibling != NULL; + sibling = node_next_get(sibling)) { node_print(sibling, depth + 1); } } @@ -89,7 +88,7 @@ heap_print(const heap_t *heap) { node_print(heap->ph.root, 0); for (auxelm = node_next_get(heap->ph.root); auxelm != NULL; - auxelm = node_next_get(auxelm)) { + auxelm = node_next_get(auxelm)) { expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); node_print(auxelm, 0); @@ -102,7 +101,7 @@ label_return: static unsigned node_validate(const node_t *node, const node_t *parent) { unsigned nnodes = 1; - node_t *leftmost_child, *sibling; + node_t *leftmost_child, *sibling; if (parent != NULL) { expect_d_ge(node_cmp_magic(node, parent), 0, @@ -113,12 +112,12 @@ node_validate(const node_t *node, const node_t *parent) { if (leftmost_child == NULL) { return nnodes; } - expect_ptr_eq(node_prev_get(leftmost_child), - (void *)node, "Leftmost child does not link to node"); + expect_ptr_eq(node_prev_get(leftmost_child), (void *)node, + "Leftmost child does not link to node"); nnodes += node_validate(leftmost_child, node); - for (sibling = node_next_get(leftmost_child); sibling != - NULL; sibling = node_next_get(sibling)) { + for (sibling = node_next_get(leftmost_child); sibling != NULL; + sibling = node_next_get(sibling)) { expect_ptr_eq(node_next_get(node_prev_get(sibling)), sibling, "sibling's prev doesn't link to sibling"); nnodes += node_validate(sibling, node); @@ -129,7 +128,7 @@ node_validate(const node_t *node, const node_t *parent) { static unsigned heap_validate(const heap_t *heap) { unsigned nnodes = 0; - node_t *auxelm; + node_t *auxelm; if (heap->ph.root == NULL) { goto label_return; @@ -138,7 +137,7 @@ heap_validate(const heap_t *heap) { nnodes += node_validate(heap->ph.root, NULL); for (auxelm = node_next_get(heap->ph.root); auxelm != NULL; - auxelm = node_next_get(auxelm)) { + auxelm = node_next_get(auxelm)) { expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); nnodes += node_validate(auxelm, NULL); @@ -186,10 +185,10 @@ TEST_BEGIN(test_ph_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 - sfmt_t *sfmt; + sfmt_t *sfmt; uint64_t bag[NNODES]; - heap_t heap; - node_t nodes[NNODES]; + heap_t heap; + node_t nodes[NNODES]; unsigned i, j, k; sfmt = init_gen_rand(SEED); @@ -216,8 +215,8 @@ TEST_BEGIN(test_ph_random) { for (j = 1; j <= NNODES; j++) { /* Initialize heap and nodes. */ heap_new(&heap); - expect_u_eq(heap_validate(&heap), 0, - "Incorrect node count"); + expect_u_eq( + heap_validate(&heap), 0, "Incorrect node count"); for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; @@ -237,8 +236,8 @@ TEST_BEGIN(test_ph_random) { "Incorrect node count"); } - expect_false(heap_empty(&heap), - "Heap should not be empty"); + expect_false( + heap_empty(&heap), "Heap should not be empty"); /* Enumerate nodes. */ heap_enumerate_helper_t helper; @@ -247,14 +246,14 @@ TEST_BEGIN(test_ph_random) { expect_u_eq(max_queue_size, BFS_ENUMERATE_MAX, "Incorrect bfs queue length initialized"); assert(max_queue_size == BFS_ENUMERATE_MAX); - heap_enumerate_prepare(&heap, &helper, - BFS_ENUMERATE_MAX, max_queue_size); + heap_enumerate_prepare( + &heap, &helper, BFS_ENUMERATE_MAX, max_queue_size); size_t node_count = 0; - while(heap_enumerate_next(&heap, &helper)) { - node_count ++; + while (heap_enumerate_next(&heap, &helper)) { + node_count++; } - expect_lu_eq(node_count, j, - "Unexpected enumeration results."); + expect_lu_eq( + node_count, j, "Unexpected enumeration results."); /* Remove nodes. */ switch (i % 6) { @@ -263,13 +262,13 @@ TEST_BEGIN(test_ph_random) { expect_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); node_remove(&heap, &nodes[k]); - expect_u_eq(heap_validate(&heap), j - k - - 1, "Incorrect node count"); + expect_u_eq(heap_validate(&heap), + j - k - 1, "Incorrect node count"); } break; case 1: for (k = j; k > 0; k--) { - node_remove(&heap, &nodes[k-1]); + node_remove(&heap, &nodes[k - 1]); expect_u_eq(heap_validate(&heap), k - 1, "Incorrect node count"); } @@ -278,58 +277,62 @@ TEST_BEGIN(test_ph_random) { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = node_remove_first(&heap); - expect_u_eq(heap_validate(&heap), j - k - - 1, "Incorrect node count"); + expect_u_eq(heap_validate(&heap), + j - k - 1, "Incorrect node count"); if (prev != NULL) { - expect_d_ge(node_cmp(node, - prev), 0, + expect_d_ge( + node_cmp(node, prev), 0, "Bad removal order"); } prev = node; } break; - } case 3: { + } + case 3: { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = heap_first(&heap); expect_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); if (prev != NULL) { - expect_d_ge(node_cmp(node, - prev), 0, + expect_d_ge( + node_cmp(node, prev), 0, "Bad removal order"); } node_remove(&heap, node); - expect_u_eq(heap_validate(&heap), j - k - - 1, "Incorrect node count"); + expect_u_eq(heap_validate(&heap), + j - k - 1, "Incorrect node count"); prev = node; } break; - } case 4: { + } + case 4: { for (k = 0; k < j; k++) { node_remove_any(&heap); - expect_u_eq(heap_validate(&heap), j - k - - 1, "Incorrect node count"); + expect_u_eq(heap_validate(&heap), + j - k - 1, "Incorrect node count"); } break; - } case 5: { + } + case 5: { for (k = 0; k < j; k++) { node_t *node = heap_any(&heap); expect_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); node_remove(&heap, node); - expect_u_eq(heap_validate(&heap), j - k - - 1, "Incorrect node count"); + expect_u_eq(heap_validate(&heap), + j - k - 1, "Incorrect node count"); } break; - } default: + } + default: not_reached(); } - expect_ptr_null(heap_first(&heap), - "Heap should be empty"); - expect_ptr_null(heap_any(&heap), - "Heap should be empty"); + expect_ptr_null( + heap_first(&heap), "Heap should be empty"); + expect_ptr_null( + heap_any(&heap), "Heap should be empty"); expect_true(heap_empty(&heap), "Heap should be empty"); } } @@ -341,7 +344,5 @@ TEST_END int main(void) { - return test( - test_ph_empty, - test_ph_random); + return test(test_ph_empty, test_ph_random); } diff --git a/test/unit/prng.c b/test/unit/prng.c index a6d9b014..20b8470e 100644 --- a/test/unit/prng.c +++ b/test/unit/prng.c @@ -9,32 +9,31 @@ TEST_BEGIN(test_prng_lg_range_u32) { ra = prng_lg_range_u32(&sa, 32); sa = 42; rb = prng_lg_range_u32(&sa, 32); - expect_u32_eq(ra, rb, - "Repeated generation should produce repeated results"); + expect_u32_eq( + ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_u32(&sb, 32); - expect_u32_eq(ra, rb, - "Equivalent generation should produce equivalent results"); + expect_u32_eq( + ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_u32(&sa, 32); rb = prng_lg_range_u32(&sa, 32); - expect_u32_ne(ra, rb, - "Full-width results must not immediately repeat"); + expect_u32_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_u32(&sa, 32); for (lg_range = 31; lg_range > 0; lg_range--) { sb = 42; rb = prng_lg_range_u32(&sb, lg_range); - expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), - 0, "High order bits should be 0, lg_range=%u", lg_range); + expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), 0, + "High order bits should be 0, lg_range=%u", lg_range); expect_u32_eq(rb, (ra >> (32 - lg_range)), "Expected high order bits of full-width result, " - "lg_range=%u", lg_range); + "lg_range=%u", + lg_range); } - } TEST_END @@ -46,19 +45,18 @@ TEST_BEGIN(test_prng_lg_range_u64) { ra = prng_lg_range_u64(&sa, 64); sa = 42; rb = prng_lg_range_u64(&sa, 64); - expect_u64_eq(ra, rb, - "Repeated generation should produce repeated results"); + expect_u64_eq( + ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_u64(&sb, 64); - expect_u64_eq(ra, rb, - "Equivalent generation should produce equivalent results"); + expect_u64_eq( + ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_u64(&sa, 64); rb = prng_lg_range_u64(&sa, 64); - expect_u64_ne(ra, rb, - "Full-width results must not immediately repeat"); + expect_u64_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_u64(&sa, 64); @@ -69,47 +67,48 @@ TEST_BEGIN(test_prng_lg_range_u64) { 0, "High order bits should be 0, lg_range=%u", lg_range); expect_u64_eq(rb, (ra >> (64 - lg_range)), "Expected high order bits of full-width result, " - "lg_range=%u", lg_range); + "lg_range=%u", + lg_range); } } TEST_END TEST_BEGIN(test_prng_lg_range_zu) { - size_t sa, sb; - size_t ra, rb; + size_t sa, sb; + size_t ra, rb; unsigned lg_range; sa = 42; ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); sa = 42; rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); - expect_zu_eq(ra, rb, - "Repeated generation should produce repeated results"); + expect_zu_eq( + ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR)); - expect_zu_eq(ra, rb, - "Equivalent generation should produce equivalent results"); + expect_zu_eq( + ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); - expect_zu_ne(ra, rb, - "Full-width results must not immediately repeat"); + expect_zu_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR)); for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; - lg_range--) { + lg_range--) { sb = 42; rb = prng_lg_range_zu(&sb, lg_range); - expect_zu_eq((rb & (SIZE_T_MAX << lg_range)), - 0, "High order bits should be 0, lg_range=%u", lg_range); - expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - - lg_range)), "Expected high order bits of full-width " - "result, lg_range=%u", lg_range); + expect_zu_eq((rb & (SIZE_T_MAX << lg_range)), 0, + "High order bits should be 0, lg_range=%u", lg_range); + expect_zu_eq(rb, + (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range)), + "Expected high order bits of full-width " + "result, lg_range=%u", + lg_range); } - } TEST_END @@ -158,13 +157,12 @@ TEST_END TEST_BEGIN(test_prng_range_zu) { size_t range; - const size_t max_range = 10000000; - const size_t range_step = 97; + const size_t max_range = 10000000; + const size_t range_step = 97; const unsigned nreps = 10; - for (range = 2; range < max_range; range += range_step) { - size_t s; + size_t s; unsigned rep; s = range; @@ -179,11 +177,7 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_prng_lg_range_u32, - test_prng_lg_range_u64, - test_prng_lg_range_zu, - test_prng_range_u32, - test_prng_range_u64, - test_prng_range_zu); + return test_no_reentrancy(test_prng_lg_range_u32, + test_prng_lg_range_u64, test_prng_lg_range_zu, test_prng_range_u32, + test_prng_range_u64, test_prng_range_zu); } diff --git a/test/unit/prof_accum.c b/test/unit/prof_accum.c index ef392acd..940468b9 100644 --- a/test/unit/prof_accum.c +++ b/test/unit/prof_accum.c @@ -3,10 +3,10 @@ #include "jemalloc/internal/prof_data.h" #include "jemalloc/internal/prof_sys.h" -#define NTHREADS 4 -#define NALLOCS_PER_THREAD 50 -#define DUMP_INTERVAL 1 -#define BT_COUNT_CHECK_INTERVAL 5 +#define NTHREADS 4 +#define NALLOCS_PER_THREAD 50 +#define DUMP_INTERVAL 1 +#define BT_COUNT_CHECK_INTERVAL 5 static int prof_dump_open_file_intercept(const char *filename, int mode) { @@ -20,13 +20,13 @@ prof_dump_open_file_intercept(const char *filename, int mode) { static void * alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { - return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration); + return btalloc(1, thd_ind * NALLOCS_PER_THREAD + iteration); } static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; - size_t bt_count_prev, bt_count; + size_t bt_count_prev, bt_count; unsigned i_prev, i; i_prev = 0; @@ -39,10 +39,10 @@ thd_start(void *varg) { 0, "Unexpected error while dumping heap profile"); } - if (i % BT_COUNT_CHECK_INTERVAL == 0 || - i+1 == NALLOCS_PER_THREAD) { + if (i % BT_COUNT_CHECK_INTERVAL == 0 + || i + 1 == NALLOCS_PER_THREAD) { bt_count = prof_bt_count(); - expect_zu_le(bt_count_prev+(i-i_prev), bt_count, + expect_zu_le(bt_count_prev + (i - i_prev), bt_count, "Expected larger backtrace count increase"); i_prev = i; bt_count_prev = bt_count; @@ -53,17 +53,17 @@ thd_start(void *varg) { } TEST_BEGIN(test_idump) { - bool active; - thd_t thds[NTHREADS]; + bool active; + thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); active = true; - expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, - sizeof(active)), 0, - "Unexpected mallctl failure while activating profiling"); + expect_d_eq( + mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open_file = prof_dump_open_file_intercept; @@ -79,6 +79,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_idump); + return test_no_reentrancy(test_idump); } diff --git a/test/unit/prof_active.c b/test/unit/prof_active.c index af29e7ad..fc8b150b 100644 --- a/test/unit/prof_active.c +++ b/test/unit/prof_active.c @@ -4,37 +4,37 @@ static void mallctl_bool_get(const char *name, bool expected, const char *func, int line) { - bool old; + bool old; size_t sz; sz = sizeof(old); expect_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading %s", func, line, name); - expect_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, - name); + expect_b_eq( + old, expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_bool_set(const char *name, bool old_expected, bool val_new, const char *func, int line) { - bool old; + bool old; size_t sz; sz = sizeof(old); - expect_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new, - sizeof(val_new)), 0, - "%s():%d: Unexpected mallctl failure reading/writing %s", func, + expect_d_eq( + mallctl(name, (void *)&old, &sz, (void *)&val_new, sizeof(val_new)), + 0, "%s():%d: Unexpected mallctl failure reading/writing %s", func, line, name); expect_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, line, name); } static void -mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, - int line) { +mallctl_prof_active_get_impl( + bool prof_active_old_expected, const char *func, int line) { mallctl_bool_get("prof.active", prof_active_old_expected, func, line); } -#define mallctl_prof_active_get(a) \ +#define mallctl_prof_active_get(a) \ mallctl_prof_active_get_impl(a, __func__, __LINE__) static void @@ -43,16 +43,16 @@ mallctl_prof_active_set_impl(bool prof_active_old_expected, mallctl_bool_set("prof.active", prof_active_old_expected, prof_active_new, func, line); } -#define mallctl_prof_active_set(a, b) \ +#define mallctl_prof_active_set(a, b) \ mallctl_prof_active_set_impl(a, b, __func__, __LINE__) static void -mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, - const char *func, int line) { - mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, - func, line); +mallctl_thread_prof_active_get_impl( + bool thread_prof_active_old_expected, const char *func, int line) { + mallctl_bool_get( + "thread.prof.active", thread_prof_active_old_expected, func, line); } -#define mallctl_thread_prof_active_get(a) \ +#define mallctl_thread_prof_active_get(a) \ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) static void @@ -61,24 +61,23 @@ mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, thread_prof_active_new, func, line); } -#define mallctl_thread_prof_active_set(a, b) \ +#define mallctl_thread_prof_active_set(a, b) \ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) static void prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { - void *p; + void *p; size_t expected_backtraces = expect_sample ? 1 : 0; - expect_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, - line); + expect_zu_eq( + prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, line); p = mallocx(1, 0); expect_ptr_not_null(p, "Unexpected mallocx() failure"); expect_zu_eq(prof_bt_count(), expected_backtraces, "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); } -#define prof_sampling_probe(a) \ - prof_sampling_probe_impl(a, __func__, __LINE__) +#define prof_sampling_probe(a) prof_sampling_probe_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); @@ -114,6 +113,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_prof_active); + return test_no_reentrancy(test_prof_active); } diff --git a/test/unit/prof_gdump.c b/test/unit/prof_gdump.c index 46e45036..4cca9bdb 100644 --- a/test/unit/prof_gdump.c +++ b/test/unit/prof_gdump.c @@ -18,16 +18,16 @@ prof_dump_open_file_intercept(const char *filename, int mode) { TEST_BEGIN(test_gdump) { test_skip_if(opt_hpa); - bool active, gdump, gdump_old; - void *p, *q, *r, *s; + bool active, gdump, gdump_old; + void *p, *q, *r, *s; size_t sz; test_skip_if(!config_prof); active = true; - expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, - sizeof(active)), 0, - "Unexpected mallctl failure while activating profiling"); + expect_d_eq( + mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open_file = prof_dump_open_file_intercept; @@ -44,8 +44,8 @@ TEST_BEGIN(test_gdump) { gdump = false; sz = sizeof(gdump_old); expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, - (void *)&gdump, sizeof(gdump)), 0, - "Unexpected mallctl failure while disabling prof.gdump"); + (void *)&gdump, sizeof(gdump)), + 0, "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); @@ -55,8 +55,8 @@ TEST_BEGIN(test_gdump) { gdump = true; sz = sizeof(gdump_old); expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, - (void *)&gdump, sizeof(gdump)), 0, - "Unexpected mallctl failure while enabling prof.gdump"); + (void *)&gdump, sizeof(gdump)), + 0, "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); @@ -72,6 +72,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_gdump); + return test_no_reentrancy(test_gdump); } diff --git a/test/unit/prof_hook.c b/test/unit/prof_hook.c index fd2871e5..1d58469c 100644 --- a/test/unit/prof_hook.c +++ b/test/unit/prof_hook.c @@ -14,10 +14,10 @@ bool mock_dump_hook_called = false; bool mock_prof_sample_hook_called = false; bool mock_prof_sample_free_hook_called = false; -void *sampled_ptr = NULL; +void *sampled_ptr = NULL; size_t sampled_ptr_sz = 0; size_t sampled_ptr_usz = 0; -void *free_sampled_ptr = NULL; +void *free_sampled_ptr = NULL; size_t free_sampled_ptr_sz = 0; void @@ -49,7 +49,6 @@ mock_bt_augmenting_hook(void **vec, unsigned *len, unsigned max_len) { (*len)++; } - mock_bt_hook_called = true; } @@ -61,14 +60,15 @@ mock_dump_hook(const char *filename) { } void -mock_prof_sample_hook(const void *ptr, size_t sz, void **vec, unsigned len, size_t usz) { +mock_prof_sample_hook( + const void *ptr, size_t sz, void **vec, unsigned len, size_t usz) { mock_prof_sample_hook_called = true; sampled_ptr = (void *)ptr; sampled_ptr_sz = sz; sampled_ptr_usz = usz; for (unsigned i = 0; i < len; i++) { - expect_ptr_not_null((void **)vec[i], - "Backtrace should not contain NULL"); + expect_ptr_not_null( + (void **)vec[i], "Backtrace should not contain NULL"); } } @@ -80,7 +80,6 @@ mock_prof_sample_free_hook(const void *ptr, size_t sz) { } TEST_BEGIN(test_prof_backtrace_hook_replace) { - test_skip_if(!config_prof); mock_bt_hook_called = false; @@ -91,15 +90,16 @@ TEST_BEGIN(test_prof_backtrace_hook_replace) { expect_false(mock_bt_hook_called, "Called mock hook before it's set"); prof_backtrace_hook_t null_hook = NULL; - expect_d_eq(mallctl("experimental.hooks.prof_backtrace", - NULL, 0, (void *)&null_hook, sizeof(null_hook)), - EINVAL, "Incorrectly allowed NULL backtrace hook"); + expect_d_eq(mallctl("experimental.hooks.prof_backtrace", NULL, 0, + (void *)&null_hook, sizeof(null_hook)), + EINVAL, "Incorrectly allowed NULL backtrace hook"); size_t default_bt_hook_sz = sizeof(prof_backtrace_hook_t); prof_backtrace_hook_t hook = &mock_bt_hook; expect_d_eq(mallctl("experimental.hooks.prof_backtrace", - (void *)&default_bt_hook, &default_bt_hook_sz, (void *)&hook, - sizeof(hook)), 0, "Unexpected mallctl failure setting hook"); + (void *)&default_bt_hook, &default_bt_hook_sz, + (void *)&hook, sizeof(hook)), + 0, "Unexpected mallctl failure setting hook"); void *p1 = mallocx(1, 0); assert_ptr_not_null(p1, "Failed to allocate"); @@ -107,11 +107,11 @@ TEST_BEGIN(test_prof_backtrace_hook_replace) { expect_true(mock_bt_hook_called, "Didn't call mock hook"); prof_backtrace_hook_t current_hook; - size_t current_hook_sz = sizeof(prof_backtrace_hook_t); + size_t current_hook_sz = sizeof(prof_backtrace_hook_t); expect_d_eq(mallctl("experimental.hooks.prof_backtrace", - (void *)¤t_hook, ¤t_hook_sz, (void *)&default_bt_hook, - sizeof(default_bt_hook)), 0, - "Unexpected mallctl failure resetting hook to default"); + (void *)¤t_hook, ¤t_hook_sz, + (void *)&default_bt_hook, sizeof(default_bt_hook)), + 0, "Unexpected mallctl failure resetting hook to default"); expect_ptr_eq(current_hook, hook, "Hook returned by mallctl is not equal to mock hook"); @@ -122,7 +122,6 @@ TEST_BEGIN(test_prof_backtrace_hook_replace) { TEST_END TEST_BEGIN(test_prof_backtrace_hook_augment) { - test_skip_if(!config_prof); mock_bt_hook_called = false; @@ -135,8 +134,9 @@ TEST_BEGIN(test_prof_backtrace_hook_augment) { size_t default_bt_hook_sz = sizeof(prof_backtrace_hook_t); prof_backtrace_hook_t hook = &mock_bt_augmenting_hook; expect_d_eq(mallctl("experimental.hooks.prof_backtrace", - (void *)&default_bt_hook, &default_bt_hook_sz, (void *)&hook, - sizeof(hook)), 0, "Unexpected mallctl failure setting hook"); + (void *)&default_bt_hook, &default_bt_hook_sz, + (void *)&hook, sizeof(hook)), + 0, "Unexpected mallctl failure setting hook"); void *p1 = mallocx(1, 0); assert_ptr_not_null(p1, "Failed to allocate"); @@ -144,11 +144,11 @@ TEST_BEGIN(test_prof_backtrace_hook_augment) { expect_true(mock_bt_hook_called, "Didn't call mock hook"); prof_backtrace_hook_t current_hook; - size_t current_hook_sz = sizeof(prof_backtrace_hook_t); + size_t current_hook_sz = sizeof(prof_backtrace_hook_t); expect_d_eq(mallctl("experimental.hooks.prof_backtrace", - (void *)¤t_hook, ¤t_hook_sz, (void *)&default_bt_hook, - sizeof(default_bt_hook)), 0, - "Unexpected mallctl failure resetting hook to default"); + (void *)¤t_hook, ¤t_hook_sz, + (void *)&default_bt_hook, sizeof(default_bt_hook)), + 0, "Unexpected mallctl failure resetting hook to default"); expect_ptr_eq(current_hook, hook, "Hook returned by mallctl is not equal to mock hook"); @@ -159,34 +159,36 @@ TEST_BEGIN(test_prof_backtrace_hook_augment) { TEST_END TEST_BEGIN(test_prof_dump_hook) { - test_skip_if(!config_prof); expect_u_eq(opt_prof_bt_max, 200, "Unexpected backtrace stack depth"); mock_dump_hook_called = false; expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename, - sizeof(dump_filename)), 0, "Failed to dump heap profile"); + sizeof(dump_filename)), + 0, "Failed to dump heap profile"); expect_false(mock_dump_hook_called, "Called dump hook before it's set"); - size_t default_bt_hook_sz = sizeof(prof_dump_hook_t); + size_t default_bt_hook_sz = sizeof(prof_dump_hook_t); prof_dump_hook_t hook = &mock_dump_hook; - expect_d_eq(mallctl("experimental.hooks.prof_dump", - (void *)&default_bt_hook, &default_bt_hook_sz, (void *)&hook, - sizeof(hook)), 0, "Unexpected mallctl failure setting hook"); + expect_d_eq( + mallctl("experimental.hooks.prof_dump", (void *)&default_bt_hook, + &default_bt_hook_sz, (void *)&hook, sizeof(hook)), + 0, "Unexpected mallctl failure setting hook"); expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename, - sizeof(dump_filename)), 0, "Failed to dump heap profile"); + sizeof(dump_filename)), + 0, "Failed to dump heap profile"); expect_true(mock_dump_hook_called, "Didn't call mock hook"); prof_dump_hook_t current_hook; - size_t current_hook_sz = sizeof(prof_dump_hook_t); + size_t current_hook_sz = sizeof(prof_dump_hook_t); expect_d_eq(mallctl("experimental.hooks.prof_dump", - (void *)¤t_hook, ¤t_hook_sz, (void *)&default_bt_hook, - sizeof(default_bt_hook)), 0, - "Unexpected mallctl failure resetting hook to default"); + (void *)¤t_hook, ¤t_hook_sz, + (void *)&default_bt_hook, sizeof(default_bt_hook)), + 0, "Unexpected mallctl failure resetting hook to default"); expect_ptr_eq(current_hook, hook, "Hook returned by mallctl is not equal to mock hook"); @@ -195,12 +197,12 @@ TEST_END /* Need the do_write flag because NULL is a valid to_write value. */ static void -read_write_prof_sample_hook(prof_sample_hook_t *to_read, bool do_write, - prof_sample_hook_t to_write) { +read_write_prof_sample_hook( + prof_sample_hook_t *to_read, bool do_write, prof_sample_hook_t to_write) { size_t hook_sz = sizeof(prof_sample_hook_t); - expect_d_eq(mallctl("experimental.hooks.prof_sample", - (void *)to_read, &hook_sz, do_write ? &to_write : NULL, hook_sz), 0, - "Unexpected prof_sample_hook mallctl failure"); + expect_d_eq(mallctl("experimental.hooks.prof_sample", (void *)to_read, + &hook_sz, do_write ? &to_write : NULL, hook_sz), + 0, "Unexpected prof_sample_hook mallctl failure"); } static void @@ -220,9 +222,10 @@ static void read_write_prof_sample_free_hook(prof_sample_free_hook_t *to_read, bool do_write, prof_sample_free_hook_t to_write) { size_t hook_sz = sizeof(prof_sample_free_hook_t); - expect_d_eq(mallctl("experimental.hooks.prof_sample_free", - (void *)to_read, &hook_sz, do_write ? &to_write : NULL, hook_sz), 0, - "Unexpected prof_sample_free_hook mallctl failure"); + expect_d_eq( + mallctl("experimental.hooks.prof_sample_free", (void *)to_read, + &hook_sz, do_write ? &to_write : NULL, hook_sz), + 0, "Unexpected prof_sample_free_hook mallctl failure"); } static void @@ -248,38 +251,40 @@ check_prof_sample_hooks(bool sample_hook_set, bool sample_free_hook_set) { expect_zu_eq(sampled_ptr_sz, 0, "Unexpected sampled ptr size"); expect_zu_eq(sampled_ptr_usz, 0, "Unexpected sampled ptr usize"); expect_ptr_null(free_sampled_ptr, "Unexpected free sampled ptr"); - expect_zu_eq(free_sampled_ptr_sz, 0, - "Unexpected free sampled ptr size"); + expect_zu_eq( + free_sampled_ptr_sz, 0, "Unexpected free sampled ptr size"); prof_sample_hook_t curr_hook = read_prof_sample_hook(); expect_ptr_eq(curr_hook, sample_hook_set ? mock_prof_sample_hook : NULL, "Unexpected non NULL default hook"); prof_sample_free_hook_t curr_free_hook = read_prof_sample_free_hook(); - expect_ptr_eq(curr_free_hook, sample_free_hook_set ? - mock_prof_sample_free_hook : NULL, + expect_ptr_eq(curr_free_hook, + sample_free_hook_set ? mock_prof_sample_free_hook : NULL, "Unexpected non NULL default hook"); size_t alloc_sz = 10; size_t alloc_usz = 16; - void *p = mallocx(alloc_sz, 0); + void *p = mallocx(alloc_sz, 0); expect_ptr_not_null(p, "Failed to allocate"); expect_true(mock_prof_sample_hook_called == sample_hook_set, - "Incorrect prof_sample hook usage"); + "Incorrect prof_sample hook usage"); if (sample_hook_set) { expect_ptr_eq(p, sampled_ptr, "Unexpected sampled ptr"); - expect_zu_eq(alloc_sz, sampled_ptr_sz, - "Unexpected sampled usize"); - expect_zu_eq(alloc_usz, sampled_ptr_usz, "Unexpected sampled usize"); + expect_zu_eq( + alloc_sz, sampled_ptr_sz, "Unexpected sampled usize"); + expect_zu_eq( + alloc_usz, sampled_ptr_usz, "Unexpected sampled usize"); } dallocx(p, 0); expect_true(mock_prof_sample_free_hook_called == sample_free_hook_set, - "Incorrect prof_sample_free hook usage"); + "Incorrect prof_sample_free hook usage"); if (sample_free_hook_set) { size_t usz = sz_s2u(alloc_sz); expect_ptr_eq(p, free_sampled_ptr, "Unexpected sampled ptr"); - expect_zu_eq(usz, free_sampled_ptr_sz, "Unexpected sampled usize"); + expect_zu_eq( + usz, free_sampled_ptr_sz, "Unexpected sampled usize"); } sampled_ptr = free_sampled_ptr = NULL; @@ -312,14 +317,14 @@ TEST_BEGIN(test_prof_sample_hooks) { check_prof_sample_hooks(true, false); prof_sample_free_hook_t sample_free_hook; - read_write_prof_sample_free_hook(&sample_free_hook, true, - mock_prof_sample_free_hook); + read_write_prof_sample_free_hook( + &sample_free_hook, true, mock_prof_sample_free_hook); expect_ptr_null(sample_free_hook, "Unexpected non NULL default hook"); check_prof_sample_hooks(true, true); read_write_prof_sample_hook(&sample_hook, true, NULL); - expect_ptr_eq(sample_hook, mock_prof_sample_hook, - "Unexpected prof_sample hook"); + expect_ptr_eq( + sample_hook, mock_prof_sample_hook, "Unexpected prof_sample hook"); check_prof_sample_hooks(false, true); read_write_prof_sample_free_hook(&sample_free_hook, true, NULL); @@ -331,9 +336,7 @@ TEST_END int main(void) { - return test( - test_prof_backtrace_hook_replace, - test_prof_backtrace_hook_augment, - test_prof_dump_hook, + return test(test_prof_backtrace_hook_replace, + test_prof_backtrace_hook_augment, test_prof_dump_hook, test_prof_sample_hooks); } diff --git a/test/unit/prof_idump.c b/test/unit/prof_idump.c index 455ac529..b16b4a1f 100644 --- a/test/unit/prof_idump.c +++ b/test/unit/prof_idump.c @@ -13,8 +13,9 @@ prof_dump_open_file_intercept(const char *filename, int mode) { did_prof_dump_open = true; const char filename_prefix[] = TEST_PREFIX "."; - expect_d_eq(strncmp(filename_prefix, filename, sizeof(filename_prefix) - - 1), 0, "Dump file name should start with \"" TEST_PREFIX ".\""); + expect_d_eq( + strncmp(filename_prefix, filename, sizeof(filename_prefix) - 1), 0, + "Dump file name should start with \"" TEST_PREFIX ".\""); fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); @@ -23,7 +24,7 @@ prof_dump_open_file_intercept(const char *filename, int mode) { } TEST_BEGIN(test_idump) { - bool active; + bool active; void *p; const char *test_prefix = TEST_PREFIX; @@ -33,12 +34,12 @@ TEST_BEGIN(test_idump) { active = true; expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&test_prefix, - sizeof(test_prefix)), 0, - "Unexpected mallctl failure while overwriting dump prefix"); + sizeof(test_prefix)), + 0, "Unexpected mallctl failure while overwriting dump prefix"); - expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, - sizeof(active)), 0, - "Unexpected mallctl failure while activating profiling"); + expect_d_eq( + mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open_file = prof_dump_open_file_intercept; @@ -52,6 +53,5 @@ TEST_END int main(void) { - return test( - test_idump); + return test(test_idump); } diff --git a/test/unit/prof_log.c b/test/unit/prof_log.c index a32fdd0b..8cfc19ff 100644 --- a/test/unit/prof_log.c +++ b/test/unit/prof_log.c @@ -4,22 +4,25 @@ #define N_PARAM 100 #define N_THREADS 10 -static void expect_rep(void) { +static void +expect_rep(void) { expect_b_eq(prof_log_rep_check(), false, "Rep check failed"); } -static void expect_log_empty(void) { - expect_zu_eq(prof_log_bt_count(), 0, - "The log has backtraces; it isn't empty"); - expect_zu_eq(prof_log_thr_count(), 0, - "The log has threads; it isn't empty"); +static void +expect_log_empty(void) { + expect_zu_eq( + prof_log_bt_count(), 0, "The log has backtraces; it isn't empty"); + expect_zu_eq( + prof_log_thr_count(), 0, "The log has threads; it isn't empty"); expect_zu_eq(prof_log_alloc_count(), 0, "The log has allocations; it isn't empty"); } void *buf[N_PARAM]; -static void f(void) { +static void +f(void) { int i; for (i = 0; i < N_PARAM; i++) { buf[i] = malloc(100); @@ -46,8 +49,8 @@ TEST_BEGIN(test_prof_log_many_logs) { f(); expect_zu_eq(prof_log_thr_count(), 1, "Wrong thread count"); expect_rep(); - expect_b_eq(prof_log_is_logging(), true, - "Logging should still be on"); + expect_b_eq( + prof_log_is_logging(), true, "Logging should still be on"); expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when stopping logging"); expect_b_eq(prof_log_is_logging(), false, @@ -58,7 +61,8 @@ TEST_END thd_t thr_buf[N_THREADS]; -static void *f_thread(void *unused) { +static void * +f_thread(void *unused) { int i; for (i = 0; i < N_PARAM; i++) { void *p = malloc(100); @@ -70,7 +74,6 @@ static void *f_thread(void *unused) { } TEST_BEGIN(test_prof_log_many_threads) { - test_skip_if(!config_prof); int i; @@ -83,32 +86,34 @@ TEST_BEGIN(test_prof_log_many_threads) { for (i = 0; i < N_THREADS; i++) { thd_join(thr_buf[i], NULL); } - expect_zu_eq(prof_log_thr_count(), N_THREADS, - "Wrong number of thread entries"); + expect_zu_eq( + prof_log_thr_count(), N_THREADS, "Wrong number of thread entries"); expect_rep(); expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when stopping logging"); } TEST_END -static void f3(void) { +static void +f3(void) { void *p = malloc(100); free(p); } -static void f1(void) { +static void +f1(void) { void *p = malloc(100); f3(); free(p); } -static void f2(void) { +static void +f2(void) { void *p = malloc(100); free(p); } TEST_BEGIN(test_prof_log_many_traces) { - test_skip_if(!config_prof); expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0, @@ -144,8 +149,6 @@ main(void) { if (config_prof) { prof_log_dummy_set(true); } - return test_no_reentrancy( - test_prof_log_many_logs, - test_prof_log_many_traces, - test_prof_log_many_threads); + return test_no_reentrancy(test_prof_log_many_logs, + test_prof_log_many_traces, test_prof_log_many_threads); } diff --git a/test/unit/prof_mdump.c b/test/unit/prof_mdump.c index 0559339e..0200f92f 100644 --- a/test/unit/prof_mdump.c +++ b/test/unit/prof_mdump.c @@ -3,7 +3,7 @@ #include "jemalloc/internal/prof_sys.h" static const char *test_filename = "test_filename"; -static bool did_prof_dump_open; +static bool did_prof_dump_open; static int prof_dump_open_file_intercept(const char *filename, int mode) { @@ -35,8 +35,8 @@ TEST_BEGIN(test_mdump_normal) { prof_dump_open_file = prof_dump_open_file_intercept; did_prof_dump_open = false; expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename, - sizeof(test_filename)), 0, - "Unexpected mallctl failure while dumping"); + sizeof(test_filename)), + 0, "Unexpected mallctl failure while dumping"); expect_true(did_prof_dump_open, "Expected a profile dump"); dallocx(p, 0); @@ -89,7 +89,8 @@ static void expect_write_failure(int count) { prof_dump_write_file_count = count; expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename, - sizeof(test_filename)), EFAULT, "Dump should err"); + sizeof(test_filename)), + EFAULT, "Dump should err"); expect_d_eq(prof_dump_write_file_count, 0, "Dumping stopped after a wrong number of writes"); } @@ -98,7 +99,7 @@ TEST_BEGIN(test_mdump_output_error) { test_skip_if(!config_prof); test_skip_if(!config_debug); - prof_dump_open_file_t *open_file_orig = prof_dump_open_file; + prof_dump_open_file_t *open_file_orig = prof_dump_open_file; prof_dump_write_file_t *write_file_orig = prof_dump_write_file; prof_dump_write_file = prof_dump_write_file_error; @@ -168,9 +169,9 @@ TEST_BEGIN(test_mdump_maps_error) { test_skip_if(!config_debug); test_skip_if(prof_dump_open_maps == NULL); - prof_dump_open_file_t *open_file_orig = prof_dump_open_file; + prof_dump_open_file_t *open_file_orig = prof_dump_open_file; prof_dump_write_file_t *write_file_orig = prof_dump_write_file; - prof_dump_open_maps_t *open_maps_orig = prof_dump_open_maps; + prof_dump_open_maps_t *open_maps_orig = prof_dump_open_maps; prof_dump_open_file = prof_dump_open_file_intercept; prof_dump_write_file = prof_dump_write_maps_file_error; @@ -186,8 +187,8 @@ TEST_BEGIN(test_mdump_maps_error) { started_piping_maps_file = false; prof_dump_write_file_count = 0; expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename, - sizeof(test_filename)), 0, - "mallctl should not fail in case of maps file opening failure"); + sizeof(test_filename)), + 0, "mallctl should not fail in case of maps file opening failure"); expect_false(started_piping_maps_file, "Shouldn't start piping maps"); expect_d_eq(prof_dump_write_file_count, 0, "Dumping stopped after a wrong number of writes"); @@ -211,7 +212,5 @@ TEST_END int main(void) { return test( - test_mdump_normal, - test_mdump_output_error, - test_mdump_maps_error); + test_mdump_normal, test_mdump_output_error, test_mdump_maps_error); } diff --git a/test/unit/prof_recent.c b/test/unit/prof_recent.c index 24ee6f42..b8fd0ca8 100644 --- a/test/unit/prof_recent.c +++ b/test/unit/prof_recent.c @@ -32,18 +32,20 @@ TEST_BEGIN(test_prof_recent_off) { test_skip_if(config_prof); const ssize_t past_ref = 0, future_ref = 0; - const size_t len_ref = sizeof(ssize_t); + const size_t len_ref = sizeof(ssize_t); ssize_t past = past_ref, future = future_ref; - size_t len = len_ref; + size_t len = len_ref; -#define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do { \ - assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c, \ - d), ENOENT, "Should return ENOENT when config_prof is off");\ - assert_zd_eq(past, past_ref, "output was touched"); \ - assert_zu_eq(len, len_ref, "output length was touched"); \ - assert_zd_eq(future, future_ref, "input was touched"); \ -} while (0) +#define ASSERT_SHOULD_FAIL(opt, a, b, c, d) \ + do { \ + assert_d_eq( \ + mallctl("experimental.prof_recent." opt, a, b, c, d), \ + ENOENT, "Should return ENOENT when config_prof is off"); \ + assert_zd_eq(past, past_ref, "output was touched"); \ + assert_zu_eq(len, len_ref, "output length was touched"); \ + assert_zd_eq(future, future_ref, "input was touched"); \ + } while (0) ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0); ASSERT_SHOULD_FAIL("alloc_max", &past, &len, NULL, 0); @@ -58,40 +60,45 @@ TEST_BEGIN(test_prof_recent_on) { test_skip_if(!config_prof); ssize_t past, future; - size_t len = sizeof(ssize_t); + size_t len = sizeof(ssize_t); confirm_prof_setup(); - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed"); + assert_d_eq( + mallctl("experimental.prof_recent.alloc_max", NULL, NULL, NULL, 0), + 0, "no-op mallctl should be allowed"); confirm_prof_setup(); - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - &past, &len, NULL, 0), 0, "Read error"); + assert_d_eq( + mallctl("experimental.prof_recent.alloc_max", &past, &len, NULL, 0), + 0, "Read error"); expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result"); future = OPT_ALLOC_MAX + 1; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, len), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, len), + 0, "Write error"); future = -1; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - &past, &len, &future, len), 0, "Read/write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", &past, &len, + &future, len), + 0, "Read/write error"); expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result"); future = -2; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - &past, &len, &future, len), EINVAL, - "Invalid write should return EINVAL"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", &past, &len, + &future, len), + EINVAL, "Invalid write should return EINVAL"); expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Output should not be touched given invalid write"); future = OPT_ALLOC_MAX; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - &past, &len, &future, len), 0, "Read/write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", &past, &len, + &future, len), + 0, "Read/write error"); expect_zd_eq(past, -1, "Wrong read result"); future = OPT_ALLOC_MAX + 2; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - &past, &len, &future, len * 2), EINVAL, - "Invalid write should return EINVAL"); - expect_zd_eq(past, -1, - "Output should not be touched given invalid write"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", &past, &len, + &future, len * 2), + EINVAL, "Invalid write should return EINVAL"); + expect_zd_eq( + past, -1, "Output should not be touched given invalid write"); confirm_prof_setup(); } @@ -107,8 +114,8 @@ confirm_malloc(void *p) { assert_ptr_not_null(e, "NULL edata for living pointer"); prof_recent_t *n = edata_prof_recent_alloc_get_no_lock_test(e); assert_ptr_not_null(n, "Record in edata should not be NULL"); - expect_ptr_not_null(n->alloc_tctx, - "alloc_tctx in record should not be NULL"); + expect_ptr_not_null( + n->alloc_tctx, "alloc_tctx in record should not be NULL"); expect_ptr_eq(e, prof_recent_alloc_edata_get_no_lock_test(n), "edata pointer in record is not correct"); expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL"); @@ -116,17 +123,17 @@ confirm_malloc(void *p) { static void confirm_record_size(prof_recent_t *n, unsigned kth) { - expect_zu_eq(n->size, NTH_REQ_SIZE(kth), - "Recorded allocation size is wrong"); + expect_zu_eq( + n->size, NTH_REQ_SIZE(kth), "Recorded allocation size is wrong"); } static void confirm_record_living(prof_recent_t *n) { - expect_ptr_not_null(n->alloc_tctx, - "alloc_tctx in record should not be NULL"); + expect_ptr_not_null( + n->alloc_tctx, "alloc_tctx in record should not be NULL"); edata_t *edata = prof_recent_alloc_edata_get_no_lock_test(n); - assert_ptr_not_null(edata, - "Recorded edata should not be NULL for living pointer"); + assert_ptr_not_null( + edata, "Recorded edata should not be NULL for living pointer"); expect_ptr_eq(n, edata_prof_recent_alloc_get_no_lock_test(edata), "Record in edata is not correct"); expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL"); @@ -134,8 +141,8 @@ confirm_record_living(prof_recent_t *n) { static void confirm_record_released(prof_recent_t *n) { - expect_ptr_not_null(n->alloc_tctx, - "alloc_tctx in record should not be NULL"); + expect_ptr_not_null( + n->alloc_tctx, "alloc_tctx in record should not be NULL"); expect_ptr_null(prof_recent_alloc_edata_get_no_lock_test(n), "Recorded edata should be NULL for released pointer"); expect_ptr_not_null(n->dalloc_tctx, @@ -145,12 +152,12 @@ confirm_record_released(prof_recent_t *n) { TEST_BEGIN(test_prof_recent_alloc) { test_skip_if(!config_prof); - bool b; - unsigned i, c; - size_t req_size; - void *p; + bool b; + unsigned i, c; + size_t req_size; + void *p; prof_recent_t *n; - ssize_t future; + ssize_t future; confirm_prof_setup(); @@ -175,7 +182,7 @@ TEST_BEGIN(test_prof_recent_alloc) { continue; } c = 0; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { ++c; confirm_record_size(n, i + c - OPT_ALLOC_MAX); if (c == OPT_ALLOC_MAX) { @@ -184,8 +191,8 @@ TEST_BEGIN(test_prof_recent_alloc) { confirm_record_released(n); } } - assert_u_eq(c, OPT_ALLOC_MAX, - "Incorrect total number of allocations"); + assert_u_eq( + c, OPT_ALLOC_MAX, "Incorrect total number of allocations"); free(p); } @@ -204,13 +211,13 @@ TEST_BEGIN(test_prof_recent_alloc) { p = malloc(req_size); assert_ptr_not_null(p, "malloc failed unexpectedly"); c = 0; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { confirm_record_size(n, c + OPT_ALLOC_MAX); confirm_record_released(n); ++c; } - assert_u_eq(c, OPT_ALLOC_MAX, - "Incorrect total number of allocations"); + assert_u_eq( + c, OPT_ALLOC_MAX, "Incorrect total number of allocations"); free(p); } @@ -231,91 +238,96 @@ TEST_BEGIN(test_prof_recent_alloc) { p = malloc(req_size); confirm_malloc(p); c = 0; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { ++c; confirm_record_size(n, /* Is the allocation from the third batch? */ - i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX ? - /* If yes, then it's just recorded. */ - i + c - OPT_ALLOC_MAX : - /* + i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX + ? + /* If yes, then it's just recorded. */ + i + c - OPT_ALLOC_MAX + : + /* * Otherwise, it should come from the first batch * instead of the second batch. */ - i + c - 2 * OPT_ALLOC_MAX); + i + c - 2 * OPT_ALLOC_MAX); if (c == OPT_ALLOC_MAX) { confirm_record_living(n); } else { confirm_record_released(n); } } - assert_u_eq(c, OPT_ALLOC_MAX, - "Incorrect total number of allocations"); + assert_u_eq( + c, OPT_ALLOC_MAX, "Incorrect total number of allocations"); free(p); } /* Increasing the limit shouldn't alter the list of records. */ future = OPT_ALLOC_MAX + 1; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); c = 0; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { confirm_record_size(n, c + 3 * OPT_ALLOC_MAX); confirm_record_released(n); ++c; } - assert_u_eq(c, OPT_ALLOC_MAX, - "Incorrect total number of allocations"); + assert_u_eq(c, OPT_ALLOC_MAX, "Incorrect total number of allocations"); /* * Decreasing the limit shouldn't alter the list of records as long as * the new limit is still no less than the length of the list. */ future = OPT_ALLOC_MAX; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); c = 0; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { confirm_record_size(n, c + 3 * OPT_ALLOC_MAX); confirm_record_released(n); ++c; } - assert_u_eq(c, OPT_ALLOC_MAX, - "Incorrect total number of allocations"); + assert_u_eq(c, OPT_ALLOC_MAX, "Incorrect total number of allocations"); /* * Decreasing the limit should shorten the list of records if the new * limit is less than the length of the list. */ future = OPT_ALLOC_MAX - 1; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); c = 0; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { ++c; confirm_record_size(n, c + 3 * OPT_ALLOC_MAX); confirm_record_released(n); } - assert_u_eq(c, OPT_ALLOC_MAX - 1, - "Incorrect total number of allocations"); + assert_u_eq( + c, OPT_ALLOC_MAX - 1, "Incorrect total number of allocations"); /* Setting to unlimited shouldn't alter the list of records. */ future = -1; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); c = 0; - ql_foreach(n, &prof_recent_alloc_list, link) { + ql_foreach (n, &prof_recent_alloc_list, link) { ++c; confirm_record_size(n, c + 3 * OPT_ALLOC_MAX); confirm_record_released(n); } - assert_u_eq(c, OPT_ALLOC_MAX - 1, - "Incorrect total number of allocations"); + assert_u_eq( + c, OPT_ALLOC_MAX - 1, "Incorrect total number of allocations"); /* Downshift to only one record. */ future = 1; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty"); n = ql_first(&prof_recent_alloc_list); confirm_record_size(n, 4 * OPT_ALLOC_MAX - 1); @@ -325,17 +337,19 @@ TEST_BEGIN(test_prof_recent_alloc) { /* Completely turn off. */ future = 0; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); - assert_true(ql_empty(&prof_recent_alloc_list), - "Recent list should be empty"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); + assert_true( + ql_empty(&prof_recent_alloc_list), "Recent list should be empty"); /* Restore the settings. */ future = OPT_ALLOC_MAX; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); - assert_true(ql_empty(&prof_recent_alloc_list), - "Recent list should be empty"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); + assert_true( + ql_empty(&prof_recent_alloc_list), "Recent list should be empty"); confirm_prof_setup(); } @@ -344,7 +358,7 @@ TEST_END #undef NTH_REQ_SIZE #define DUMP_OUT_SIZE 4096 -static char dump_out[DUMP_OUT_SIZE]; +static char dump_out[DUMP_OUT_SIZE]; static size_t dump_out_len = 0; static void @@ -359,14 +373,15 @@ static void call_dump(void) { static void *in[2] = {test_dump_write_cb, NULL}; dump_out_len = 0; - assert_d_eq(mallctl("experimental.prof_recent.alloc_dump", - NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_dump", NULL, NULL, + in, sizeof(in)), + 0, "Dump mallctl raised error"); } typedef struct { size_t size; size_t usize; - bool released; + bool released; } confirm_record_t; #define DUMP_ERROR "Dump output is wrong" @@ -375,7 +390,7 @@ static void confirm_record(const char *template, const confirm_record_t *records, const size_t n_records) { static const char *types[2] = {"alloc", "dalloc"}; - static char buf[64]; + static char buf[64]; /* * The template string would be in the form of: @@ -384,32 +399,35 @@ confirm_record(const char *template, const confirm_record_t *records, * "{...,\"recent_alloc\":[...]}". * Using "- 2" serves to cut right before the ending "]}". */ - assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0, - DUMP_ERROR); + assert_d_eq( + memcmp(dump_out, template, strlen(template) - 2), 0, DUMP_ERROR); assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2, - template + strlen(template) - 2, 2), 0, DUMP_ERROR); + template + strlen(template) - 2, 2), + 0, DUMP_ERROR); - const char *start = dump_out + strlen(template) - 2; - const char *end = dump_out + strlen(dump_out) - 2; + const char *start = dump_out + strlen(template) - 2; + const char *end = dump_out + strlen(dump_out) - 2; const confirm_record_t *record; for (record = records; record < records + n_records; ++record) { +#define ASSERT_CHAR(c) \ + do { \ + assert_true(start < end, DUMP_ERROR); \ + assert_c_eq(*start++, c, DUMP_ERROR); \ + } while (0) -#define ASSERT_CHAR(c) do { \ - assert_true(start < end, DUMP_ERROR); \ - assert_c_eq(*start++, c, DUMP_ERROR); \ -} while (0) +#define ASSERT_STR(s) \ + do { \ + const size_t len = strlen(s); \ + assert_true(start + len <= end, DUMP_ERROR); \ + assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \ + start += len; \ + } while (0) -#define ASSERT_STR(s) do { \ - const size_t len = strlen(s); \ - assert_true(start + len <= end, DUMP_ERROR); \ - assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \ - start += len; \ -} while (0) - -#define ASSERT_FORMATTED_STR(s, ...) do { \ - malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__); \ - ASSERT_STR(buf); \ -} while (0) +#define ASSERT_FORMATTED_STR(s, ...) \ + do { \ + malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__); \ + ASSERT_STR(buf); \ + } while (0) if (record != records) { ASSERT_CHAR(','); @@ -442,10 +460,10 @@ confirm_record(const char *template, const confirm_record_t *records, ASSERT_CHAR(','); if (thd_has_setname() && opt_prof_sys_thread_name) { - ASSERT_FORMATTED_STR("\"%s_thread_name\"", - *type); - ASSERT_FORMATTED_STR(":\"%s\",", - test_thread_name); + ASSERT_FORMATTED_STR( + "\"%s_thread_name\"", *type); + ASSERT_FORMATTED_STR( + ":\"%s\",", test_thread_name); } ASSERT_FORMATTED_STR("\"%s_time\"", *type); @@ -458,9 +476,9 @@ confirm_record(const char *template, const confirm_record_t *records, ASSERT_FORMATTED_STR("\"%s_trace\"", *type); ASSERT_CHAR(':'); ASSERT_CHAR('['); - while (isdigit(*start) || *start == 'x' || - (*start >= 'a' && *start <= 'f') || - *start == '\"' || *start == ',') { + while (isdigit(*start) || *start == 'x' + || (*start >= 'a' && *start <= 'f') + || *start == '\"' || *start == ',') { ++start; } ASSERT_CHAR(']'); @@ -483,7 +501,6 @@ confirm_record(const char *template, const confirm_record_t *records, #undef ASSERT_FORMATTED_STR #undef ASSERT_STR #undef ASSERT_CHAR - } assert_ptr_eq(record, records + n_records, DUMP_ERROR); assert_ptr_eq(start, end, DUMP_ERROR); @@ -495,25 +512,30 @@ TEST_BEGIN(test_prof_recent_alloc_dump) { thd_setname(test_thread_name); confirm_prof_setup(); - ssize_t future; - void *p, *q; + ssize_t future; + void *p, *q; confirm_record_t records[2]; - assert_zu_eq(lg_prof_sample, (size_t)0, - "lg_prof_sample not set correctly"); + assert_zu_eq( + lg_prof_sample, (size_t)0, "lg_prof_sample not set correctly"); future = 0; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); call_dump(); - expect_str_eq(dump_out, "{\"sample_interval\":1," - "\"recent_alloc_max\":0,\"recent_alloc\":[]}", DUMP_ERROR); + expect_str_eq(dump_out, + "{\"sample_interval\":1," + "\"recent_alloc_max\":0,\"recent_alloc\":[]}", + DUMP_ERROR); future = 2; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); call_dump(); - const char *template = "{\"sample_interval\":1," + const char *template = + "{\"sample_interval\":1," "\"recent_alloc_max\":2,\"recent_alloc\":[]}"; expect_str_eq(dump_out, template, DUMP_ERROR); @@ -542,8 +564,9 @@ TEST_BEGIN(test_prof_recent_alloc_dump) { confirm_record(template, records, 2); future = OPT_ALLOC_MAX; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &future, sizeof(ssize_t)), + 0, "Write error"); confirm_prof_setup(); } TEST_END @@ -558,14 +581,14 @@ TEST_END #define STRESS_ALLOC_MAX 4096 typedef struct { - thd_t thd; + thd_t thd; size_t id; - void *ptrs[N_PTRS]; + void *ptrs[N_PTRS]; size_t count; } thd_data_t; static thd_data_t thd_data[N_THREADS]; -static ssize_t test_max; +static ssize_t test_max; static void test_write_cb(void *cbopaque, const char *str) { @@ -575,11 +598,11 @@ test_write_cb(void *cbopaque, const char *str) { static void * f_thread(void *arg) { const size_t thd_id = *(size_t *)arg; - thd_data_t *data_p = thd_data + thd_id; + thd_data_t *data_p = thd_data + thd_id; assert(data_p->id == thd_id); data_p->count = 0; uint64_t rand = (uint64_t)thd_id; - tsd_t *tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch(); assert(test_max > 1); ssize_t last_max = -1; for (int i = 0; i < N_ITERS; i++) { @@ -603,15 +626,15 @@ f_thread(void *arg) { } else if (rand % 5 == 1) { last_max = prof_recent_alloc_max_ctl_read(); } else if (rand % 5 == 2) { - last_max = - prof_recent_alloc_max_ctl_write(tsd, test_max * 2); + last_max = prof_recent_alloc_max_ctl_write( + tsd, test_max * 2); } else if (rand % 5 == 3) { - last_max = - prof_recent_alloc_max_ctl_write(tsd, test_max); + last_max = prof_recent_alloc_max_ctl_write( + tsd, test_max); } else { assert(rand % 5 == 4); - last_max = - prof_recent_alloc_max_ctl_write(tsd, test_max / 2); + last_max = prof_recent_alloc_max_ctl_write( + tsd, test_max / 2); } assert_zd_ge(last_max, -1, "Illegal last-N max"); } @@ -640,8 +663,9 @@ TEST_BEGIN(test_prof_recent_stress) { } test_max = STRESS_ALLOC_MAX; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &test_max, sizeof(ssize_t)), + 0, "Write error"); for (size_t i = 0; i < N_THREADS; i++) { thd_data_t *data_p = thd_data + i; data_p->id = i; @@ -653,8 +677,9 @@ TEST_BEGIN(test_prof_recent_stress) { } test_max = OPT_ALLOC_MAX; - assert_d_eq(mallctl("experimental.prof_recent.alloc_max", - NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error"); + assert_d_eq(mallctl("experimental.prof_recent.alloc_max", NULL, NULL, + &test_max, sizeof(ssize_t)), + 0, "Write error"); confirm_prof_setup(); } TEST_END @@ -666,11 +691,7 @@ TEST_END int main(void) { - return test( - test_confirm_setup, - test_prof_recent_off, - test_prof_recent_on, - test_prof_recent_alloc, - test_prof_recent_alloc_dump, - test_prof_recent_stress); + return test(test_confirm_setup, test_prof_recent_off, + test_prof_recent_on, test_prof_recent_alloc, + test_prof_recent_alloc_dump, test_prof_recent_stress); } diff --git a/test/unit/prof_reset.c b/test/unit/prof_reset.c index 9b33b205..0e64279e 100644 --- a/test/unit/prof_reset.c +++ b/test/unit/prof_reset.c @@ -15,8 +15,9 @@ prof_dump_open_file_intercept(const char *filename, int mode) { static void set_prof_active(bool active) { - expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, - sizeof(active)), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), + 0, "Unexpected mallctl failure"); } static size_t @@ -32,25 +33,26 @@ get_lg_prof_sample(void) { static void do_prof_reset(size_t lg_prof_sample_input) { expect_d_eq(mallctl("prof.reset", NULL, NULL, - (void *)&lg_prof_sample_input, sizeof(size_t)), 0, - "Unexpected mallctl failure while resetting profile data"); + (void *)&lg_prof_sample_input, sizeof(size_t)), + 0, "Unexpected mallctl failure while resetting profile data"); expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(), "Expected profile sample rate change"); } TEST_BEGIN(test_prof_reset_basic) { - size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next; - size_t sz; + size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next; + size_t sz; unsigned i; test_skip_if(!config_prof); sz = sizeof(size_t); expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig, - &sz, NULL, 0), 0, + &sz, NULL, 0), + 0, "Unexpected mallctl failure while reading profiling sample rate"); - expect_zu_eq(lg_prof_sample_orig, 0, - "Unexpected profiling sample rate"); + expect_zu_eq( + lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); lg_prof_sample_cur = get_lg_prof_sample(); expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur, "Unexpected disagreement between \"opt.lg_prof_sample\" and " @@ -110,23 +112,24 @@ TEST_BEGIN(test_prof_reset_cleanup) { } TEST_END -#define NTHREADS 4 -#define NALLOCS_PER_THREAD (1U << 13) -#define OBJ_RING_BUF_COUNT 1531 -#define RESET_INTERVAL (1U << 10) -#define DUMP_INTERVAL 3677 +#define NTHREADS 4 +#define NALLOCS_PER_THREAD (1U << 13) +#define OBJ_RING_BUF_COUNT 1531 +#define RESET_INTERVAL (1U << 10) +#define DUMP_INTERVAL 3677 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; unsigned i; - void *objs[OBJ_RING_BUF_COUNT]; + void *objs[OBJ_RING_BUF_COUNT]; memset(objs, 0, sizeof(objs)); for (i = 0; i < NALLOCS_PER_THREAD; i++) { if (i % RESET_INTERVAL == 0) { expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), - 0, "Unexpected error while resetting heap profile " + 0, + "Unexpected error while resetting heap profile " "data"); } @@ -141,9 +144,9 @@ thd_start(void *varg) { dallocx(*pp, 0); *pp = NULL; } - *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); - expect_ptr_not_null(*pp, - "Unexpected btalloc() failure"); + *pp = btalloc(1, thd_ind * NALLOCS_PER_THREAD + i); + expect_ptr_not_null( + *pp, "Unexpected btalloc() failure"); } } @@ -160,17 +163,16 @@ thd_start(void *varg) { } TEST_BEGIN(test_prof_reset) { - size_t lg_prof_sample_orig; - thd_t thds[NTHREADS]; + size_t lg_prof_sample_orig; + thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; - size_t bt_count, tdata_count; + size_t bt_count, tdata_count; test_skip_if(!config_prof); bt_count = prof_bt_count(); - expect_zu_eq(bt_count, 0, - "Unexpected pre-existing tdata structures"); + expect_zu_eq(bt_count, 0, "Unexpected pre-existing tdata structures"); tdata_count = prof_tdata_count(); lg_prof_sample_orig = get_lg_prof_sample(); @@ -186,8 +188,8 @@ TEST_BEGIN(test_prof_reset) { thd_join(thds[i], NULL); } - expect_zu_eq(prof_bt_count(), bt_count, - "Unexpected bactrace count change"); + expect_zu_eq( + prof_bt_count(), bt_count, "Unexpected bactrace count change"); expect_zu_eq(prof_tdata_count(), tdata_count, "Unexpected remaining tdata structures"); @@ -205,9 +207,9 @@ TEST_END /* Test sampling at the same allocation site across resets. */ #define NITER 10 TEST_BEGIN(test_xallocx) { - size_t lg_prof_sample_orig; + size_t lg_prof_sample_orig; unsigned i; - void *ptrs[NITER]; + void *ptrs[NITER]; test_skip_if(!config_prof); @@ -218,7 +220,7 @@ TEST_BEGIN(test_xallocx) { do_prof_reset(0); for (i = 0; i < NITER; i++) { - void *p; + void *p; size_t sz, nsz; /* Reset profiling. */ @@ -233,13 +235,13 @@ TEST_BEGIN(test_xallocx) { /* Perform successful xallocx(). */ sz = sallocx(p, 0); - expect_zu_eq(xallocx(p, sz, 0, 0), sz, - "Unexpected xallocx() failure"); + expect_zu_eq( + xallocx(p, sz, 0, 0), sz, "Unexpected xallocx() failure"); /* Perform unsuccessful xallocx(). */ - nsz = nallocx(sz+1, 0); - expect_zu_eq(xallocx(p, nsz, 0, 0), sz, - "Unexpected xallocx() success"); + nsz = nallocx(sz + 1, 0); + expect_zu_eq( + xallocx(p, nsz, 0, 0), sz, "Unexpected xallocx() success"); } for (i = 0; i < NITER; i++) { @@ -258,9 +260,6 @@ main(void) { /* Intercept dumping prior to running any tests. */ prof_dump_open_file = prof_dump_open_file_intercept; - return test_no_reentrancy( - test_prof_reset_basic, - test_prof_reset_cleanup, - test_prof_reset, - test_xallocx); + return test_no_reentrancy(test_prof_reset_basic, + test_prof_reset_cleanup, test_prof_reset, test_xallocx); } diff --git a/test/unit/prof_small.c b/test/unit/prof_small.c index e3462c1f..993a83a7 100644 --- a/test/unit/prof_small.c +++ b/test/unit/prof_small.c @@ -1,6 +1,7 @@ #include "test/jemalloc_test.h" -static void assert_small_allocation_sampled(void *ptr, size_t size) { +static void +assert_small_allocation_sampled(void *ptr, size_t size) { assert_ptr_not_null(ptr, "Unexpected malloc failure"); assert_zu_le(size, SC_SMALL_MAXCLASS, "Unexpected large size class"); edata_t *edata = emap_edata_lookup(TSDN_NULL, &arena_emap_global, ptr); @@ -24,7 +25,7 @@ TEST_BEGIN(test_profile_small_allocations) { for (szind_t index = 0; index < SC_NBINS; index++) { size_t size = sz_index2size(index); - void *ptr = malloc(size); + void *ptr = malloc(size); assert_small_allocation_sampled(ptr, size); free(ptr); } @@ -36,7 +37,7 @@ TEST_BEGIN(test_profile_small_allocations_sdallocx) { for (szind_t index = 0; index < SC_NBINS; index++) { size_t size = sz_index2size(index); - void *ptr = malloc(size); + void *ptr = malloc(size); assert_small_allocation_sampled(ptr, size); /* * While free calls into ifree, sdallocx calls into isfree, @@ -86,7 +87,7 @@ TEST_BEGIN(test_profile_small_reallocations_same_size_class) { for (szind_t index = 0; index < SC_NBINS; index++) { size_t size = sz_index2size(index); - void *ptr = malloc(size); + void *ptr = malloc(size); assert_small_allocation_sampled(ptr, size); ptr = realloc(ptr, size - 1); assert_small_allocation_sampled(ptr, size); diff --git a/test/unit/prof_stats.c b/test/unit/prof_stats.c index c88c4ae0..95ca051c 100644 --- a/test/unit/prof_stats.c +++ b/test/unit/prof_stats.c @@ -3,8 +3,8 @@ #define N_PTRS 3 static void -test_combinations(szind_t ind, size_t sizes_array[N_PTRS], - int flags_array[N_PTRS]) { +test_combinations( + szind_t ind, size_t sizes_array[N_PTRS], int flags_array[N_PTRS]) { #define MALLCTL_STR_LEN 64 assert(opt_prof && opt_prof_stats); @@ -25,11 +25,13 @@ test_combinations(szind_t ind, size_t sizes_array[N_PTRS], size_t stats_len = 2 * sizeof(uint64_t); uint64_t live_stats_orig[2]; - assert_d_eq(mallctl(mallctl_live_str, &live_stats_orig, &stats_len, - NULL, 0), 0, ""); + assert_d_eq( + mallctl(mallctl_live_str, &live_stats_orig, &stats_len, NULL, 0), 0, + ""); uint64_t accum_stats_orig[2]; - assert_d_eq(mallctl(mallctl_accum_str, &accum_stats_orig, &stats_len, - NULL, 0), 0, ""); + assert_d_eq( + mallctl(mallctl_accum_str, &accum_stats_orig, &stats_len, NULL, 0), + 0, ""); void *ptrs[N_PTRS]; @@ -40,8 +42,8 @@ test_combinations(szind_t ind, size_t sizes_array[N_PTRS], for (size_t i = 0; i < N_PTRS; ++i) { size_t sz = sizes_array[i]; - int flags = flags_array[i]; - void *p = mallocx(sz, flags); + int flags = flags_array[i]; + void *p = mallocx(sz, flags); assert_ptr_not_null(p, "malloc() failed"); assert(TEST_MALLOC_SIZE(p) == sz_index2size(ind)); ptrs[i] = p; @@ -50,41 +52,45 @@ test_combinations(szind_t ind, size_t sizes_array[N_PTRS], accum_req_sum += sz; accum_count++; uint64_t live_stats[2]; - assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len, - NULL, 0), 0, ""); - expect_u64_eq(live_stats[0] - live_stats_orig[0], - live_req_sum, ""); - expect_u64_eq(live_stats[1] - live_stats_orig[1], - live_count, ""); + assert_d_eq( + mallctl(mallctl_live_str, &live_stats, &stats_len, NULL, 0), + 0, ""); + expect_u64_eq( + live_stats[0] - live_stats_orig[0], live_req_sum, ""); + expect_u64_eq( + live_stats[1] - live_stats_orig[1], live_count, ""); uint64_t accum_stats[2]; assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len, - NULL, 0), 0, ""); - expect_u64_eq(accum_stats[0] - accum_stats_orig[0], - accum_req_sum, ""); - expect_u64_eq(accum_stats[1] - accum_stats_orig[1], - accum_count, ""); + NULL, 0), + 0, ""); + expect_u64_eq( + accum_stats[0] - accum_stats_orig[0], accum_req_sum, ""); + expect_u64_eq( + accum_stats[1] - accum_stats_orig[1], accum_count, ""); } for (size_t i = 0; i < N_PTRS; ++i) { size_t sz = sizes_array[i]; - int flags = flags_array[i]; + int flags = flags_array[i]; sdallocx(ptrs[i], sz, flags); live_req_sum -= sz; live_count--; uint64_t live_stats[2]; - assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len, - NULL, 0), 0, ""); - expect_u64_eq(live_stats[0] - live_stats_orig[0], - live_req_sum, ""); - expect_u64_eq(live_stats[1] - live_stats_orig[1], - live_count, ""); + assert_d_eq( + mallctl(mallctl_live_str, &live_stats, &stats_len, NULL, 0), + 0, ""); + expect_u64_eq( + live_stats[0] - live_stats_orig[0], live_req_sum, ""); + expect_u64_eq( + live_stats[1] - live_stats_orig[1], live_count, ""); uint64_t accum_stats[2]; assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len, - NULL, 0), 0, ""); - expect_u64_eq(accum_stats[0] - accum_stats_orig[0], - accum_req_sum, ""); - expect_u64_eq(accum_stats[1] - accum_stats_orig[1], - accum_count, ""); + NULL, 0), + 0, ""); + expect_u64_eq( + accum_stats[0] - accum_stats_orig[0], accum_req_sum, ""); + expect_u64_eq( + accum_stats[1] - accum_stats_orig[1], accum_count, ""); } #undef MALLCTL_STR_LEN } @@ -92,9 +98,9 @@ test_combinations(szind_t ind, size_t sizes_array[N_PTRS], static void test_szind_wrapper(szind_t ind) { size_t sizes_array[N_PTRS]; - int flags_array[N_PTRS]; + int flags_array[N_PTRS]; for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS; - ++i, ++sz) { + ++i, ++sz) { sizes_array[i] = sz; flags_array[i] = 0; } @@ -115,10 +121,10 @@ TEST_END static void test_szind_aligned_wrapper(szind_t ind, unsigned lg_align) { size_t sizes_array[N_PTRS]; - int flags_array[N_PTRS]; - int flags = MALLOCX_LG_ALIGN(lg_align); + int flags_array[N_PTRS]; + int flags = MALLOCX_LG_ALIGN(lg_align); for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS; - ++i, ++sz) { + ++i, ++sz) { sizes_array[i] = sz; flags_array[i] = flags; } @@ -136,7 +142,7 @@ TEST_BEGIN(test_prof_stats_aligned) { } for (szind_t ind = SC_NBINS - 5; ind < SC_NBINS + 5; ++ind) { for (unsigned lg_align = SC_LG_LARGE_MINCLASS - 5; - lg_align < SC_LG_LARGE_MINCLASS + 5; ++lg_align) { + lg_align < SC_LG_LARGE_MINCLASS + 5; ++lg_align) { test_szind_aligned_wrapper(ind, lg_align); } } @@ -145,7 +151,5 @@ TEST_END int main(void) { - return test( - test_prof_stats, - test_prof_stats_aligned); + return test(test_prof_stats, test_prof_stats_aligned); } diff --git a/test/unit/prof_sys_thread_name.c b/test/unit/prof_sys_thread_name.c index 3aeb8cf1..242e2fc3 100644 --- a/test/unit/prof_sys_thread_name.c +++ b/test/unit/prof_sys_thread_name.c @@ -28,7 +28,7 @@ TEST_BEGIN(test_prof_sys_thread_name) { test_skip_if(!config_prof); test_skip_if(!opt_prof_sys_thread_name); - bool oldval; + bool oldval; size_t sz = sizeof(oldval); assert_d_eq(mallctl("opt.prof_sys_thread_name", &oldval, &sz, NULL, 0), 0, "mallctl failed"); @@ -43,8 +43,8 @@ TEST_BEGIN(test_prof_sys_thread_name) { thread_name = test_thread_name; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sz), ENOENT, "mallctl write for thread name should fail"); - assert_ptr_eq(thread_name, test_thread_name, - "Thread name should not be touched"); + assert_ptr_eq( + thread_name, test_thread_name, "Thread name should not be touched"); prof_sys_thread_name_read_t *orig_prof_sys_thread_name_read = prof_sys_thread_name_read; @@ -69,14 +69,15 @@ TEST_BEGIN(test_prof_sys_thread_name) { free(p); assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0, "mallctl read for thread name should not fail"); - expect_str_eq(thread_name, "", "Thread name should be updated if the " + expect_str_eq(thread_name, "", + "Thread name should be updated if the " "system call returns a different name"); prof_sys_thread_name_read = orig_prof_sys_thread_name_read; } TEST_END -#define ITER (16*1024) +#define ITER (16 * 1024) static void * thd_start(void *unused) { /* Triggering samples which loads thread names. */ @@ -94,7 +95,7 @@ TEST_BEGIN(test_prof_sys_thread_name_mt) { test_skip_if(!opt_prof_sys_thread_name); #define NTHREADS 4 - thd_t thds[NTHREADS]; + thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; @@ -105,8 +106,8 @@ TEST_BEGIN(test_prof_sys_thread_name_mt) { /* Prof dump which reads the thread names. */ for (i = 0; i < ITER; i++) { expect_d_eq(mallctl("prof.dump", NULL, NULL, - (void *)&dump_filename, sizeof(dump_filename)), 0, - "Unexpected mallctl failure while dumping"); + (void *)&dump_filename, sizeof(dump_filename)), + 0, "Unexpected mallctl failure while dumping"); } for (i = 0; i < NTHREADS; i++) { @@ -119,7 +120,5 @@ TEST_END int main(void) { - return test( - test_prof_sys_thread_name, - test_prof_sys_thread_name_mt); + return test(test_prof_sys_thread_name, test_prof_sys_thread_name_mt); } diff --git a/test/unit/prof_tctx.c b/test/unit/prof_tctx.c index d19dd395..7fde7230 100644 --- a/test/unit/prof_tctx.c +++ b/test/unit/prof_tctx.c @@ -3,11 +3,11 @@ #include "jemalloc/internal/prof_data.h" TEST_BEGIN(test_prof_realloc) { - tsd_t *tsd; - int flags; - void *p, *q; + tsd_t *tsd; + int flags; + void *p, *q; prof_info_t prof_info_p, prof_info_q; - prof_cnt_t cnt_0, cnt_1, cnt_2, cnt_3; + prof_cnt_t cnt_0, cnt_1, cnt_2, cnt_3; test_skip_if(!config_prof); @@ -18,8 +18,8 @@ TEST_BEGIN(test_prof_realloc) { p = mallocx(1024, flags); expect_ptr_not_null(p, "Unexpected mallocx() failure"); prof_info_get(tsd, p, NULL, &prof_info_p); - expect_ptr_ne(prof_info_p.alloc_tctx, PROF_TCTX_SENTINEL, - "Expected valid tctx"); + expect_ptr_ne( + prof_info_p.alloc_tctx, PROF_TCTX_SENTINEL, "Expected valid tctx"); prof_cnt_all(&cnt_1); expect_u64_eq(cnt_0.curobjs + 1, cnt_1.curobjs, "Allocation should have increased sample size"); @@ -28,8 +28,8 @@ TEST_BEGIN(test_prof_realloc) { expect_ptr_ne(p, q, "Expected move"); expect_ptr_not_null(p, "Unexpected rmallocx() failure"); prof_info_get(tsd, q, NULL, &prof_info_q); - expect_ptr_ne(prof_info_q.alloc_tctx, PROF_TCTX_SENTINEL, - "Expected valid tctx"); + expect_ptr_ne( + prof_info_q.alloc_tctx, PROF_TCTX_SENTINEL, "Expected valid tctx"); prof_cnt_all(&cnt_2); expect_u64_eq(cnt_1.curobjs, cnt_2.curobjs, "Reallocation should not have changed sample size"); @@ -43,6 +43,5 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_prof_realloc); + return test_no_reentrancy(test_prof_realloc); } diff --git a/test/unit/prof_thread_name.c b/test/unit/prof_thread_name.c index 0fc29f75..8b12c435 100644 --- a/test/unit/prof_thread_name.c +++ b/test/unit/prof_thread_name.c @@ -1,34 +1,34 @@ #include "test/jemalloc_test.h" static void -mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, - int line) { +mallctl_thread_name_get_impl( + const char *thread_name_expected, const char *func, int line) { const char *thread_name_old; - size_t sz; + size_t sz; sz = sizeof(thread_name_old); - expect_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, - NULL, 0), 0, - "%s():%d: Unexpected mallctl failure reading thread.prof.name", + expect_d_eq( + mallctl("thread.prof.name", (void *)&thread_name_old, &sz, NULL, 0), + 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); expect_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); } static void -mallctl_thread_name_set_impl(const char *thread_name, const char *func, - int line) { +mallctl_thread_name_set_impl( + const char *thread_name, const char *func, int line) { expect_d_eq(mallctl("thread.prof.name", NULL, NULL, - (void *)&thread_name, sizeof(thread_name)), 0, - "%s():%d: Unexpected mallctl failure writing thread.prof.name", + (void *)&thread_name, sizeof(thread_name)), + 0, "%s():%d: Unexpected mallctl failure writing thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); } -#define mallctl_thread_name_get(a) \ +#define mallctl_thread_name_get(a) \ mallctl_thread_name_get_impl(a, __func__, __LINE__) -#define mallctl_thread_name_set(a) \ +#define mallctl_thread_name_set(a) \ mallctl_thread_name_set_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_thread_name_validation) { @@ -44,34 +44,35 @@ TEST_BEGIN(test_prof_thread_name_validation) { char long_name[] = "test case longer than expected; test case longer than expected"; expect_zu_gt(strlen(long_name), PROF_THREAD_NAME_MAX_LEN, - "Long test name not long enough"); + "Long test name not long enough"); const char *test_name_long = long_name; expect_d_eq(mallctl("thread.prof.name", NULL, NULL, - (void *)&test_name_long, sizeof(test_name_long)), 0, - "Unexpected mallctl failure from thread.prof.name"); + (void *)&test_name_long, sizeof(test_name_long)), + 0, "Unexpected mallctl failure from thread.prof.name"); /* Long name cut to match. */ long_name[PROF_THREAD_NAME_MAX_LEN - 1] = '\0'; mallctl_thread_name_get(test_name_long); /* NULL input shouldn't be allowed. */ const char *test_name2 = NULL; - expect_d_eq(mallctl("thread.prof.name", NULL, NULL, - (void *)&test_name2, sizeof(test_name2)), EINVAL, - "Unexpected mallctl result writing to thread.prof.name"); + expect_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&test_name2, + sizeof(test_name2)), + EINVAL, "Unexpected mallctl result writing to thread.prof.name"); /* '\n' shouldn't be allowed. */ const char *test_name3 = "test\ncase"; - expect_d_eq(mallctl("thread.prof.name", NULL, NULL, - (void *)&test_name3, sizeof(test_name3)), EINVAL, + expect_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&test_name3, + sizeof(test_name3)), + EINVAL, "Unexpected mallctl result writing \"%s\" to thread.prof.name", test_name3); /* Simultaneous read/write shouldn't be allowed. */ const char *thread_name_old; - size_t sz = sizeof(thread_name_old); + size_t sz = sizeof(thread_name_old); expect_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, - (void *)&test_name1, sizeof(test_name1)), EPERM, - "Unexpected mallctl result from thread.prof.name"); + (void *)&test_name1, sizeof(test_name1)), + EPERM, "Unexpected mallctl result from thread.prof.name"); mallctl_thread_name_set(""); } @@ -80,7 +81,7 @@ TEST_END static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; - char thread_name[16] = ""; + char thread_name[16] = ""; unsigned i; malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); @@ -107,7 +108,7 @@ TEST_BEGIN(test_prof_thread_name_threaded) { test_skip_if(opt_prof_sys_thread_name); #define NTHREADS 4 - thd_t thds[NTHREADS]; + thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; @@ -125,6 +126,5 @@ TEST_END int main(void) { return test( - test_prof_thread_name_validation, - test_prof_thread_name_threaded); + test_prof_thread_name_validation, test_prof_thread_name_threaded); } diff --git a/test/unit/prof_threshold.c b/test/unit/prof_threshold.c index c6f53983..a31a5a24 100644 --- a/test/unit/prof_threshold.c +++ b/test/unit/prof_threshold.c @@ -23,9 +23,10 @@ static void read_write_prof_threshold_hook(prof_threshold_hook_t *to_read, bool do_write, prof_threshold_hook_t to_write) { size_t hook_sz = sizeof(prof_threshold_hook_t); - expect_d_eq(mallctl("experimental.hooks.prof_threshold", - (void *)to_read, &hook_sz, do_write ? &to_write : NULL, hook_sz), 0, - "Unexpected prof_threshold_hook mallctl failure"); + expect_d_eq( + mallctl("experimental.hooks.prof_threshold", (void *)to_read, + &hook_sz, do_write ? &to_write : NULL, hook_sz), + 0, "Unexpected prof_threshold_hook mallctl failure"); } static void @@ -40,7 +41,8 @@ read_prof_threshold_hook() { return hook; } -static void reset_test_config() { +static void +reset_test_config() { hook_calls = 0; last_peak = 0; alloc_baseline = last_alloc; /* We run the test multiple times */ @@ -49,15 +51,20 @@ static void reset_test_config() { chunk_size = threshold_bytes / ALLOC_ITERATIONS_IN_THRESHOLD; } -static void expect_threshold_calls(int calls) { - expect_u64_eq(hook_calls, calls, "Hook called the right amount of times"); - expect_u64_lt(last_peak, chunk_size * 2, "We allocate chunk_size at a time"); - expect_u64_ge(last_alloc, threshold_bytes * calls + alloc_baseline, "Crosses"); +static void +expect_threshold_calls(int calls) { + expect_u64_eq( + hook_calls, calls, "Hook called the right amount of times"); + expect_u64_lt( + last_peak, chunk_size * 2, "We allocate chunk_size at a time"); + expect_u64_ge( + last_alloc, threshold_bytes * calls + alloc_baseline, "Crosses"); } -static void allocate_chunks(int chunks) { +static void +allocate_chunks(int chunks) { for (int i = 0; i < chunks; i++) { - void* p = mallocx((size_t)chunk_size, 0); + void *p = mallocx((size_t)chunk_size, 0); expect_ptr_not_null(p, "Failed to allocate"); free(p); } @@ -68,7 +75,8 @@ TEST_BEGIN(test_prof_threshold_hook) { /* Test setting and reading the hook (both value and null) */ write_prof_threshold_hook(mock_prof_threshold_hook); - expect_ptr_eq(read_prof_threshold_hook(), mock_prof_threshold_hook, "Unexpected hook"); + expect_ptr_eq(read_prof_threshold_hook(), mock_prof_threshold_hook, + "Unexpected hook"); write_prof_threshold_hook(NULL); expect_ptr_null(read_prof_threshold_hook(), "Hook was erased"); @@ -100,6 +108,5 @@ TEST_END int main(void) { - return test( - test_prof_threshold_hook); + return test(test_prof_threshold_hook); } diff --git a/test/unit/psset.c b/test/unit/psset.c index c834e531..73a9835a 100644 --- a/test/unit/psset.c +++ b/test/unit/psset.c @@ -21,8 +21,8 @@ test_psset_fake_purge(hpdata_t *ps) { hpdata_alloc_allowed_set(ps, false); size_t nranges; hpdata_purge_begin(ps, &purge_state, &nranges); - (void) nranges; - void *addr; + (void)nranges; + void *addr; size_t size; while (hpdata_purge_next(ps, &purge_state, &addr, &size)) { } @@ -31,8 +31,8 @@ test_psset_fake_purge(hpdata_t *ps) { } static void -test_psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata, - size_t size) { +test_psset_alloc_new( + psset_t *psset, hpdata_t *ps, edata_t *r_edata, size_t size) { hpdata_assert_empty(ps); test_psset_fake_purge(ps); @@ -40,12 +40,12 @@ test_psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata, psset_insert(psset, ps); psset_update_begin(psset, ps); - void *addr = hpdata_reserve_alloc(ps, size); - edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size, + void *addr = hpdata_reserve_alloc(ps, size); + edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size, /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active, - /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA, - EXTENT_NOT_HEAD); - edata_ps_set(r_edata, ps); + /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA, + EXTENT_NOT_HEAD); + edata_ps_set(r_edata, ps); psset_update_end(psset, ps); } @@ -104,15 +104,14 @@ edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) { * Note that allocations should get the arena ind of their home * arena, *not* the arena ind of the pageslab allocator. */ - expect_u_eq(ALLOC_ARENA_IND, edata_arena_ind_get(edata), - "Arena ind changed"); + expect_u_eq( + ALLOC_ARENA_IND, edata_arena_ind_get(edata), "Arena ind changed"); expect_ptr_eq( (void *)((uintptr_t)PAGESLAB_ADDR + (page_offset << LG_PAGE)), edata_addr_get(edata), "Didn't allocate in order"); expect_zu_eq(page_cnt << LG_PAGE, edata_size_get(edata), ""); expect_false(edata_slab_get(edata), ""); - expect_u_eq(SC_NSIZES, edata_szind_get_maybe_invalid(edata), - ""); + expect_u_eq(SC_NSIZES, edata_szind_get_maybe_invalid(edata), ""); expect_u64_eq(0, edata_sn_get(edata), ""); expect_d_eq(edata_state_get(edata), extent_state_active, ""); expect_false(edata_zeroed_get(edata), ""); @@ -123,7 +122,7 @@ edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) { TEST_BEGIN(test_empty) { test_skip_if(hpa_hugepage_size_exceeds_limit()); - bool err; + bool err; hpdata_t pageslab; hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE); @@ -176,7 +175,7 @@ TEST_END TEST_BEGIN(test_reuse) { test_skip_if(hpa_hugepage_size_exceeds_limit()); - bool err; + bool err; hpdata_t *ps; hpdata_t pageslab; @@ -196,7 +195,7 @@ TEST_BEGIN(test_reuse) { } /* Free odd indices. */ - for (size_t i = 0; i < HUGEPAGE_PAGES; i ++) { + for (size_t i = 0; i < HUGEPAGE_PAGES; i++) { if (i % 2 == 0) { continue; } @@ -271,7 +270,7 @@ TEST_END TEST_BEGIN(test_evict) { test_skip_if(hpa_hugepage_size_exceeds_limit()); - bool err; + bool err; hpdata_t *ps; hpdata_t pageslab; @@ -308,16 +307,15 @@ TEST_END TEST_BEGIN(test_multi_pageslab) { test_skip_if(hpa_hugepage_size_exceeds_limit()); - bool err; + bool err; hpdata_t *ps; hpdata_t pageslab[2]; hpdata_init(&pageslab[0], PAGESLAB_ADDR, PAGESLAB_AGE); - hpdata_init(&pageslab[1], - (void *)((uintptr_t)PAGESLAB_ADDR + HUGEPAGE), + hpdata_init(&pageslab[1], (void *)((uintptr_t)PAGESLAB_ADDR + HUGEPAGE), PAGESLAB_AGE + 1); - edata_t* alloc[2]; + edata_t *alloc[2]; alloc[0] = (edata_t *)malloc(sizeof(edata_t) * HUGEPAGE_PAGES); alloc[1] = (edata_t *)malloc(sizeof(edata_t) * HUGEPAGE_PAGES); @@ -334,9 +332,10 @@ TEST_BEGIN(test_multi_pageslab) { for (size_t i = 0; i < 2; i++) { for (size_t j = 1; j < HUGEPAGE_PAGES; j++) { edata_init_test(&alloc[i][j]); - err = test_psset_alloc_reuse(&psset, &alloc[i][j], PAGE); - expect_false(err, - "Nonempty psset failed page allocation."); + err = test_psset_alloc_reuse( + &psset, &alloc[i][j], PAGE); + expect_false( + err, "Nonempty psset failed page allocation."); assert_ptr_eq(&pageslab[i], edata_ps_get(&alloc[i][j]), "Didn't pick pageslabs in first-fit"); } @@ -505,7 +504,8 @@ TEST_BEGIN(test_stats_huge) { expect_zu_eq(1, psset.stats.slabs[0].npageslabs, ""); expect_zu_eq(i, psset.stats.slabs[0].nactive, ""); - expect_zu_eq(HUGEPAGE_PAGES - i, psset.stats.slabs[0].ndirty, ""); + expect_zu_eq( + HUGEPAGE_PAGES - i, psset.stats.slabs[0].ndirty, ""); expect_zu_eq(0, psset.stats.slabs[1].npageslabs, ""); expect_zu_eq(0, psset.stats.slabs[1].nactive, ""); @@ -527,7 +527,8 @@ static void stats_expect_empty(psset_bin_stats_t *stats) { assert_zu_eq(0, stats->npageslabs, "Supposedly empty bin had positive npageslabs"); - expect_zu_eq(0, stats->nactive, "Unexpected nonempty bin" + expect_zu_eq(0, stats->nactive, + "Unexpected nonempty bin" "Supposedly empty bin had positive nactive"); } @@ -536,17 +537,16 @@ stats_expect(psset_t *psset, size_t nactive) { if (nactive == HUGEPAGE_PAGES) { expect_zu_eq(1, psset->stats.full_slabs[0].npageslabs, "Expected a full slab"); - expect_zu_eq(HUGEPAGE_PAGES, - psset->stats.full_slabs[0].nactive, + expect_zu_eq(HUGEPAGE_PAGES, psset->stats.full_slabs[0].nactive, "Should have exactly filled the bin"); } else { stats_expect_empty(&psset->stats.full_slabs[0]); } - size_t ninactive = HUGEPAGE_PAGES - nactive; + size_t ninactive = HUGEPAGE_PAGES - nactive; pszind_t nonempty_pind = PSSET_NPSIZES; if (ninactive != 0 && ninactive < HUGEPAGE_PAGES) { - nonempty_pind = sz_psz2ind(sz_psz_quantize_floor( - ninactive << LG_PAGE)); + nonempty_pind = sz_psz2ind( + sz_psz_quantize_floor(ninactive << LG_PAGE)); } for (pszind_t i = 0; i < PSSET_NPSIZES; i++) { if (i == nonempty_pind) { @@ -657,24 +657,25 @@ init_test_pageslabs(psset_t *psset, hpdata_t *pageslab, } /* Deallocate the last page from the older pageslab. */ - hpdata_t *evicted = test_psset_dalloc(psset, - &alloc[HUGEPAGE_PAGES - 1]); + hpdata_t *evicted = test_psset_dalloc( + psset, &alloc[HUGEPAGE_PAGES - 1]); expect_ptr_null(evicted, "Unexpected eviction"); } TEST_BEGIN(test_oldest_fit) { test_skip_if(hpa_hugepage_size_exceeds_limit()); - bool err; + bool err; edata_t *alloc = (edata_t *)malloc(sizeof(edata_t) * HUGEPAGE_PAGES); - edata_t *worse_alloc = (edata_t *)malloc(sizeof(edata_t) * HUGEPAGE_PAGES); + edata_t *worse_alloc = (edata_t *)malloc( + sizeof(edata_t) * HUGEPAGE_PAGES); hpdata_t pageslab; hpdata_t worse_pageslab; psset_t psset; - init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc, - worse_alloc); + init_test_pageslabs( + &psset, &pageslab, &worse_pageslab, alloc, worse_alloc); /* The edata should come from the better pageslab. */ edata_t test_edata; @@ -691,23 +692,24 @@ TEST_END TEST_BEGIN(test_insert_remove) { test_skip_if(hpa_hugepage_size_exceeds_limit()); - bool err; + bool err; hpdata_t *ps; - edata_t *alloc = (edata_t *)malloc(sizeof(edata_t) * HUGEPAGE_PAGES); - edata_t *worse_alloc = (edata_t *)malloc(sizeof(edata_t) * HUGEPAGE_PAGES); + edata_t *alloc = (edata_t *)malloc(sizeof(edata_t) * HUGEPAGE_PAGES); + edata_t *worse_alloc = (edata_t *)malloc( + sizeof(edata_t) * HUGEPAGE_PAGES); hpdata_t pageslab; hpdata_t worse_pageslab; psset_t psset; - init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc, - worse_alloc); + init_test_pageslabs( + &psset, &pageslab, &worse_pageslab, alloc, worse_alloc); /* Remove better; should still be able to alloc from worse. */ psset_update_begin(&psset, &pageslab); - err = test_psset_alloc_reuse(&psset, &worse_alloc[HUGEPAGE_PAGES - 1], - PAGE); + err = test_psset_alloc_reuse( + &psset, &worse_alloc[HUGEPAGE_PAGES - 1], PAGE); expect_false(err, "Removal should still leave an empty page"); expect_ptr_eq(&worse_pageslab, edata_ps_get(&worse_alloc[HUGEPAGE_PAGES - 1]), @@ -755,23 +757,21 @@ TEST_BEGIN(test_purge_prefers_nonhuge) { psset_t psset; psset_init(&psset); - hpdata_t hpdata_huge[NHP]; + hpdata_t hpdata_huge[NHP]; uintptr_t huge_begin = (uintptr_t)&hpdata_huge[0]; uintptr_t huge_end = (uintptr_t)&hpdata_huge[NHP]; - hpdata_t hpdata_nonhuge[NHP]; + hpdata_t hpdata_nonhuge[NHP]; uintptr_t nonhuge_begin = (uintptr_t)&hpdata_nonhuge[0]; uintptr_t nonhuge_end = (uintptr_t)&hpdata_nonhuge[NHP]; for (size_t i = 0; i < NHP; i++) { - hpdata_init(&hpdata_huge[i], (void *)((10 + i) * HUGEPAGE), - 123 + i); + hpdata_init( + &hpdata_huge[i], (void *)((10 + i) * HUGEPAGE), 123 + i); psset_insert(&psset, &hpdata_huge[i]); hpdata_init(&hpdata_nonhuge[i], - (void *)((10 + NHP + i) * HUGEPAGE), - 456 + i); + (void *)((10 + NHP + i) * HUGEPAGE), 456 + i); psset_insert(&psset, &hpdata_nonhuge[i]); - } for (int i = 0; i < 2 * NHP; i++) { hpdata = psset_pick_alloc(&psset, HUGEPAGE * 3 / 4); @@ -804,7 +804,8 @@ TEST_BEGIN(test_purge_prefers_nonhuge) { for (int i = 0; i < NHP; i++) { hpdata = psset_pick_purge(&psset); assert_true(nonhuge_begin <= (uintptr_t)hpdata - && (uintptr_t)hpdata < nonhuge_end, ""); + && (uintptr_t)hpdata < nonhuge_end, + ""); psset_update_begin(&psset, hpdata); test_psset_fake_purge(hpdata); hpdata_purge_allowed_set(hpdata, false); @@ -813,7 +814,8 @@ TEST_BEGIN(test_purge_prefers_nonhuge) { for (int i = 0; i < NHP; i++) { hpdata = psset_pick_purge(&psset); expect_true(huge_begin <= (uintptr_t)hpdata - && (uintptr_t)hpdata < huge_end, ""); + && (uintptr_t)hpdata < huge_end, + ""); psset_update_begin(&psset, hpdata); hpdata_dehugify(hpdata); test_psset_fake_purge(hpdata); @@ -867,13 +869,13 @@ TEST_BEGIN(test_purge_prefers_empty_huge) { psset_t psset; psset_init(&psset); - enum {NHP = 10 }; + enum { NHP = 10 }; hpdata_t hpdata_huge[NHP]; hpdata_t hpdata_nonhuge[NHP]; uintptr_t cur_addr = 100 * HUGEPAGE; - uint64_t cur_age = 123; + uint64_t cur_age = 123; for (int i = 0; i < NHP; i++) { hpdata_init(&hpdata_huge[i], (void *)cur_addr, cur_age); cur_addr += HUGEPAGE; @@ -933,18 +935,9 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_empty, - test_fill, - test_reuse, - test_evict, - test_multi_pageslab, - test_stats_merged, - test_stats_huge, - test_stats_fullness, - test_oldest_fit, - test_insert_remove, - test_purge_prefers_nonhuge, - test_purge_prefers_empty, + return test_no_reentrancy(test_empty, test_fill, test_reuse, test_evict, + test_multi_pageslab, test_stats_merged, test_stats_huge, + test_stats_fullness, test_oldest_fit, test_insert_remove, + test_purge_prefers_nonhuge, test_purge_prefers_empty, test_purge_prefers_empty_huge); } diff --git a/test/unit/ql.c b/test/unit/ql.c index f9130582..ff3b436e 100644 --- a/test/unit/ql.c +++ b/test/unit/ql.c @@ -15,16 +15,16 @@ struct list_s { static void test_empty_list(list_head_t *head) { - list_t *t; + list_t *t; unsigned i; expect_true(ql_empty(head), "Unexpected element for empty list"); expect_ptr_null(ql_first(head), "Unexpected element for empty list"); - expect_ptr_null(ql_last(head, link), - "Unexpected element for empty list"); + expect_ptr_null( + ql_last(head, link), "Unexpected element for empty list"); i = 0; - ql_foreach(t, head, link) { + ql_foreach (t, head, link) { i++; } expect_u_eq(i, 0, "Unexpected element for empty list"); @@ -56,48 +56,48 @@ init_entries(list_t *entries, unsigned nentries) { static void test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { - list_t *t; + list_t *t; unsigned i; expect_false(ql_empty(head), "List should not be empty"); expect_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); - expect_c_eq(ql_last(head, link)->id, entries[nentries-1].id, + expect_c_eq(ql_last(head, link)->id, entries[nentries - 1].id, "Element id mismatch"); i = 0; - ql_foreach(t, head, link) { + ql_foreach (t, head, link) { expect_c_eq(t->id, entries[i].id, "Element id mismatch"); i++; } i = 0; ql_reverse_foreach(t, head, link) { - expect_c_eq(t->id, entries[nentries-i-1].id, - "Element id mismatch"); + expect_c_eq( + t->id, entries[nentries - i - 1].id, "Element id mismatch"); i++; } - for (i = 0; i < nentries-1; i++) { + for (i = 0; i < nentries - 1; i++) { t = ql_next(head, &entries[i], link); - expect_c_eq(t->id, entries[i+1].id, "Element id mismatch"); + expect_c_eq(t->id, entries[i + 1].id, "Element id mismatch"); } - expect_ptr_null(ql_next(head, &entries[nentries-1], link), - "Unexpected element"); + expect_ptr_null( + ql_next(head, &entries[nentries - 1], link), "Unexpected element"); expect_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); for (i = 1; i < nentries; i++) { t = ql_prev(head, &entries[i], link); - expect_c_eq(t->id, entries[i-1].id, "Element id mismatch"); + expect_c_eq(t->id, entries[i - 1].id, "Element id mismatch"); } } TEST_BEGIN(test_ql_tail_insert) { list_head_t head; - list_t entries[NENTRIES]; - unsigned i; + list_t entries[NENTRIES]; + unsigned i; ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); + init_entries(entries, sizeof(entries) / sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); } @@ -108,17 +108,17 @@ TEST_END TEST_BEGIN(test_ql_tail_remove) { list_head_t head; - list_t entries[NENTRIES]; - unsigned i; + list_t entries[NENTRIES]; + unsigned i; ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); + init_entries(entries, sizeof(entries) / sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); } for (i = 0; i < NENTRIES; i++) { - test_entries_list(&head, entries, NENTRIES-i); + test_entries_list(&head, entries, NENTRIES - i); ql_tail_remove(&head, list_t, link); } test_empty_list(&head); @@ -127,13 +127,13 @@ TEST_END TEST_BEGIN(test_ql_head_insert) { list_head_t head; - list_t entries[NENTRIES]; - unsigned i; + list_t entries[NENTRIES]; + unsigned i; ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); + init_entries(entries, sizeof(entries) / sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { - ql_head_insert(&head, &entries[NENTRIES-i-1], link); + ql_head_insert(&head, &entries[NENTRIES - i - 1], link); } test_entries_list(&head, entries, NENTRIES); @@ -142,17 +142,17 @@ TEST_END TEST_BEGIN(test_ql_head_remove) { list_head_t head; - list_t entries[NENTRIES]; - unsigned i; + list_t entries[NENTRIES]; + unsigned i; ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); + init_entries(entries, sizeof(entries) / sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { - ql_head_insert(&head, &entries[NENTRIES-i-1], link); + ql_head_insert(&head, &entries[NENTRIES - i - 1], link); } for (i = 0; i < NENTRIES; i++) { - test_entries_list(&head, &entries[i], NENTRIES-i); + test_entries_list(&head, &entries[i], NENTRIES - i); ql_head_remove(&head, list_t, link); } test_empty_list(&head); @@ -161,11 +161,11 @@ TEST_END TEST_BEGIN(test_ql_insert) { list_head_t head; - list_t entries[8]; - list_t *a, *b, *c, *d, *e, *f, *g, *h; + list_t entries[8]; + list_t *a, *b, *c, *d, *e, *f, *g, *h; ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); + init_entries(entries, sizeof(entries) / sizeof(list_t)); a = &entries[0]; b = &entries[1]; c = &entries[2]; @@ -190,13 +190,13 @@ TEST_BEGIN(test_ql_insert) { ql_after_insert(c, d, link); ql_before_insert(&head, f, e, link); - test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); + test_entries_list(&head, entries, sizeof(entries) / sizeof(list_t)); } TEST_END static void -test_concat_split_entries(list_t *entries, unsigned nentries_a, - unsigned nentries_b) { +test_concat_split_entries( + list_t *entries, unsigned nentries_a, unsigned nentries_b) { init_entries(entries, nentries_a + nentries_b); list_head_t head_a; @@ -253,8 +253,8 @@ TEST_BEGIN(test_ql_concat_split) { test_concat_split_entries(entries, 0, NENTRIES); test_concat_split_entries(entries, 1, NENTRIES - 1); - test_concat_split_entries(entries, NENTRIES / 2, - NENTRIES - NENTRIES / 2); + test_concat_split_entries( + entries, NENTRIES / 2, NENTRIES - NENTRIES / 2); test_concat_split_entries(entries, NENTRIES - 1, 1); test_concat_split_entries(entries, NENTRIES, 0); } @@ -262,11 +262,11 @@ TEST_END TEST_BEGIN(test_ql_rotate) { list_head_t head; - list_t entries[NENTRIES]; - unsigned i; + list_t entries[NENTRIES]; + unsigned i; ql_new(&head); - init_entries(entries, sizeof(entries)/sizeof(list_t)); + init_entries(entries, sizeof(entries) / sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); } @@ -284,15 +284,15 @@ TEST_END TEST_BEGIN(test_ql_move) { list_head_t head_dest, head_src; - list_t entries[NENTRIES]; - unsigned i; + list_t entries[NENTRIES]; + unsigned i; ql_new(&head_src); ql_move(&head_dest, &head_src); test_empty_list(&head_src); test_empty_list(&head_dest); - init_entries(entries, sizeof(entries)/sizeof(list_t)); + init_entries(entries, sizeof(entries) / sizeof(list_t)); for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head_src, &entries[i], link); } @@ -304,14 +304,7 @@ TEST_END int main(void) { - return test( - test_ql_empty, - test_ql_tail_insert, - test_ql_tail_remove, - test_ql_head_insert, - test_ql_head_remove, - test_ql_insert, - test_ql_concat_split, - test_ql_rotate, - test_ql_move); + return test(test_ql_empty, test_ql_tail_insert, test_ql_tail_remove, + test_ql_head_insert, test_ql_head_remove, test_ql_insert, + test_ql_concat_split, test_ql_rotate, test_ql_move); } diff --git a/test/unit/qr.c b/test/unit/qr.c index 16eed0e9..3d8b164b 100644 --- a/test/unit/qr.c +++ b/test/unit/qr.c @@ -26,12 +26,12 @@ init_entries(ring_t *entries) { static void test_independent_entries(ring_t *entries) { - ring_t *t; + ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; - qr_foreach(t, &entries[i], link) { + qr_foreach (t, &entries[i], link) { j++; } expect_u_eq(j, 1, @@ -71,13 +71,13 @@ TEST_END static void test_entries_ring(ring_t *entries) { - ring_t *t; + ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; - qr_foreach(t, &entries[i], link) { - expect_c_eq(t->id, entries[(i+j) % NENTRIES].id, + qr_foreach (t, &entries[i], link) { + expect_c_eq(t->id, entries[(i + j) % NENTRIES].id, "Element id mismatch"); j++; } @@ -85,25 +85,26 @@ test_entries_ring(ring_t *entries) { for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { - expect_c_eq(t->id, entries[(NENTRIES+i-j-1) % - NENTRIES].id, "Element id mismatch"); + expect_c_eq(t->id, + entries[(NENTRIES + i - j - 1) % NENTRIES].id, + "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); - expect_c_eq(t->id, entries[(i+1) % NENTRIES].id, + expect_c_eq(t->id, entries[(i + 1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); - expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + expect_c_eq(t->id, entries[(NENTRIES + i - 1) % NENTRIES].id, "Element id mismatch"); } } TEST_BEGIN(test_qr_after_insert) { - ring_t entries[NENTRIES]; + ring_t entries[NENTRIES]; unsigned i; init_entries(entries); @@ -115,8 +116,8 @@ TEST_BEGIN(test_qr_after_insert) { TEST_END TEST_BEGIN(test_qr_remove) { - ring_t entries[NENTRIES]; - ring_t *t; + ring_t entries[NENTRIES]; + ring_t *t; unsigned i, j; init_entries(entries); @@ -126,15 +127,15 @@ TEST_BEGIN(test_qr_remove) { for (i = 0; i < NENTRIES; i++) { j = 0; - qr_foreach(t, &entries[i], link) { - expect_c_eq(t->id, entries[i+j].id, - "Element id mismatch"); + qr_foreach (t, &entries[i], link) { + expect_c_eq( + t->id, entries[i + j].id, "Element id mismatch"); j++; } j = 0; qr_reverse_foreach(t, &entries[i], link) { expect_c_eq(t->id, entries[NENTRIES - 1 - j].id, - "Element id mismatch"); + "Element id mismatch"); j++; } qr_remove(&entries[i], link); @@ -144,8 +145,8 @@ TEST_BEGIN(test_qr_remove) { TEST_END TEST_BEGIN(test_qr_before_insert) { - ring_t entries[NENTRIES]; - ring_t *t; + ring_t entries[NENTRIES]; + ring_t *t; unsigned i, j; init_entries(entries); @@ -154,28 +155,29 @@ TEST_BEGIN(test_qr_before_insert) { } for (i = 0; i < NENTRIES; i++) { j = 0; - qr_foreach(t, &entries[i], link) { - expect_c_eq(t->id, entries[(NENTRIES+i-j) % - NENTRIES].id, "Element id mismatch"); + qr_foreach (t, &entries[i], link) { + expect_c_eq(t->id, + entries[(NENTRIES + i - j) % NENTRIES].id, + "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { j = 0; qr_reverse_foreach(t, &entries[i], link) { - expect_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, + expect_c_eq(t->id, entries[(i + j + 1) % NENTRIES].id, "Element id mismatch"); j++; } } for (i = 0; i < NENTRIES; i++) { t = qr_next(&entries[i], link); - expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + expect_c_eq(t->id, entries[(NENTRIES + i - 1) % NENTRIES].id, "Element id mismatch"); } for (i = 0; i < NENTRIES; i++) { t = qr_prev(&entries[i], link); - expect_c_eq(t->id, entries[(i+1) % NENTRIES].id, + expect_c_eq(t->id, entries[(i + 1) % NENTRIES].id, "Element id mismatch"); } } @@ -183,19 +185,22 @@ TEST_END static void test_split_entries(ring_t *entries) { - ring_t *t; + ring_t *t; unsigned i, j; for (i = 0; i < NENTRIES; i++) { j = 0; - qr_foreach(t, &entries[i], link) { + qr_foreach (t, &entries[i], link) { if (i < SPLIT_INDEX) { expect_c_eq(t->id, - entries[(i+j) % SPLIT_INDEX].id, + entries[(i + j) % SPLIT_INDEX].id, "Element id mismatch"); } else { - expect_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % - (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, + expect_c_eq(t->id, + entries[(i + j - SPLIT_INDEX) + % (NENTRIES - SPLIT_INDEX) + + SPLIT_INDEX] + .id, "Element id mismatch"); } j++; @@ -204,7 +209,7 @@ test_split_entries(ring_t *entries) { } TEST_BEGIN(test_qr_meld_split) { - ring_t entries[NENTRIES]; + ring_t entries[NENTRIES]; unsigned i; init_entries(entries); @@ -234,10 +239,6 @@ TEST_END int main(void) { - return test( - test_qr_one, - test_qr_after_insert, - test_qr_remove, - test_qr_before_insert, - test_qr_meld_split); + return test(test_qr_one, test_qr_after_insert, test_qr_remove, + test_qr_before_insert, test_qr_meld_split); } diff --git a/test/unit/rb.c b/test/unit/rb.c index 827ec510..790593e3 100644 --- a/test/unit/rb.c +++ b/test/unit/rb.c @@ -4,16 +4,17 @@ #include "jemalloc/internal/rb.h" -#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ - a_type *rbp_bh_t; \ - for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \ - NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \ - rbp_bh_t)) { \ - if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ - (r_height)++; \ - } \ - } \ -} while (0) +#define rbtn_black_height(a_type, a_field, a_rbt, r_height) \ + do { \ + a_type *rbp_bh_t; \ + for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ + rbp_bh_t != NULL; \ + rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ + if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ + (r_height)++; \ + } \ + } \ + } while (0) static bool summarize_always_returns_true = false; @@ -55,7 +56,7 @@ struct node_s { */ const node_t *summary_lchild; const node_t *summary_rchild; - uint64_t summary_max_specialness; + uint64_t summary_max_specialness; }; static int @@ -80,8 +81,8 @@ node_cmp(const node_t *a, const node_t *b) { } static uint64_t -node_subtree_specialness(node_t *n, const node_t *lchild, - const node_t *rchild) { +node_subtree_specialness( + node_t *n, const node_t *lchild, const node_t *rchild) { uint64_t subtree_specialness = n->specialness; if (lchild != NULL && lchild->summary_max_specialness > subtree_specialness) { @@ -109,8 +110,8 @@ node_summarize(node_t *a, const node_t *lchild, const node_t *rchild) { typedef rb_tree(node_t) tree_t; rb_summarized_proto(static, tree_, tree_t, node_t); -rb_summarized_gen(static, tree_, tree_t, node_t, link, node_cmp, - node_summarize); +rb_summarized_gen( + static, tree_, tree_t, node_t, link, node_cmp, node_summarize); static bool specialness_filter_node(void *ctx, node_t *node) { @@ -127,24 +128,24 @@ specialness_filter_subtree(void *ctx, node_t *node) { static node_t * tree_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; - node_t *search_node; + node_t *search_node; expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Test rb_search(). */ search_node = tree_search(tree, node); - expect_ptr_eq(search_node, node, - "tree_search() returned unexpected node"); + expect_ptr_eq( + search_node, node, "tree_search() returned unexpected node"); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); - expect_ptr_eq(search_node, node, - "tree_nsearch() returned unexpected node"); + expect_ptr_eq( + search_node, node, "tree_nsearch() returned unexpected node"); /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); - expect_ptr_eq(search_node, node, - "tree_psearch() returned unexpected node"); + expect_ptr_eq( + search_node, node, "tree_psearch() returned unexpected node"); (*i)++; @@ -174,38 +175,44 @@ TEST_BEGIN(test_rb_empty) { expect_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); unsigned nodes = 0; - tree_iter_filtered(&tree, NULL, &tree_iterate_cb, - &nodes, &specialness_filter_node, &specialness_filter_subtree, - NULL); + tree_iter_filtered(&tree, NULL, &tree_iterate_cb, &nodes, + &specialness_filter_node, &specialness_filter_subtree, NULL); expect_u_eq(0, nodes, ""); nodes = 0; - tree_reverse_iter_filtered(&tree, NULL, &tree_iterate_cb, - &nodes, &specialness_filter_node, &specialness_filter_subtree, - NULL); + tree_reverse_iter_filtered(&tree, NULL, &tree_iterate_cb, &nodes, + &specialness_filter_node, &specialness_filter_subtree, NULL); expect_u_eq(0, nodes, ""); expect_ptr_null(tree_first_filtered(&tree, &specialness_filter_node, - &specialness_filter_subtree, NULL), ""); + &specialness_filter_subtree, NULL), + ""); expect_ptr_null(tree_last_filtered(&tree, &specialness_filter_node, - &specialness_filter_subtree, NULL), ""); + &specialness_filter_subtree, NULL), + ""); key.key = 0; key.magic = NODE_MAGIC; - expect_ptr_null(tree_search_filtered(&tree, &key, - &specialness_filter_node, &specialness_filter_subtree, NULL), ""); - expect_ptr_null(tree_nsearch_filtered(&tree, &key, - &specialness_filter_node, &specialness_filter_subtree, NULL), ""); - expect_ptr_null(tree_psearch_filtered(&tree, &key, - &specialness_filter_node, &specialness_filter_subtree, NULL), ""); + expect_ptr_null( + tree_search_filtered(&tree, &key, &specialness_filter_node, + &specialness_filter_subtree, NULL), + ""); + expect_ptr_null( + tree_nsearch_filtered(&tree, &key, &specialness_filter_node, + &specialness_filter_subtree, NULL), + ""); + expect_ptr_null( + tree_psearch_filtered(&tree, &key, &specialness_filter_node, + &specialness_filter_subtree, NULL), + ""); } TEST_END static unsigned tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { unsigned ret = 0; - node_t *left_node; - node_t *right_node; + node_t *left_node; + node_t *right_node; if (node == NULL) { return ret; @@ -214,13 +221,13 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { left_node = rbtn_left_get(node_t, link, node); right_node = rbtn_right_get(node_t, link, node); - expect_ptr_eq(left_node, node->summary_lchild, - "summary missed a tree update"); - expect_ptr_eq(right_node, node->summary_rchild, - "summary missed a tree update"); + expect_ptr_eq( + left_node, node->summary_lchild, "summary missed a tree update"); + expect_ptr_eq( + right_node, node->summary_rchild, "summary missed a tree update"); - uint64_t expected_subtree_specialness = node_subtree_specialness(node, - left_node, right_node); + uint64_t expected_subtree_specialness = node_subtree_specialness( + node, left_node, right_node); expect_u64_eq(expected_subtree_specialness, node->summary_max_specialness, "Incorrect summary"); @@ -232,7 +239,7 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { if (rbtn_red_get(node_t, link, node)) { if (left_node != NULL) { expect_false(rbtn_red_get(node_t, link, left_node), - "Node should be black"); + "Node should be black"); } if (right_node != NULL) { expect_false(rbtn_red_get(node_t, link, right_node), @@ -282,7 +289,7 @@ tree_iterate_reverse(tree_t *tree) { static void node_remove(tree_t *tree, node_t *node, unsigned nnodes) { - node_t *search_node; + node_t *search_node; unsigned black_height, imbalances; tree_remove(tree, node); @@ -290,15 +297,15 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) { /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); if (search_node != NULL) { - expect_u64_ge(search_node->key, node->key, - "Key ordering error"); + expect_u64_ge( + search_node->key, node->key, "Key ordering error"); } /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); if (search_node != NULL) { - expect_u64_le(search_node->key, node->key, - "Key ordering error"); + expect_u64_le( + search_node->key, node->key, "Key ordering error"); } node->magic = 0; @@ -306,16 +313,16 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) { rbtn_black_height(node_t, link, tree, black_height); imbalances = tree_recurse(tree->rbt_root, black_height, 0); expect_u_eq(imbalances, 0, "Tree is unbalanced"); - expect_u_eq(tree_iterate(tree), nnodes-1, - "Unexpected node iteration count"); - expect_u_eq(tree_iterate_reverse(tree), nnodes-1, + expect_u_eq( + tree_iterate(tree), nnodes - 1, "Unexpected node iteration count"); + expect_u_eq(tree_iterate_reverse(tree), nnodes - 1, "Unexpected node iteration count"); } static node_t * remove_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; - node_t *ret = tree_next(tree, node); + node_t *ret = tree_next(tree, node); node_remove(tree, node, *nnodes); @@ -325,7 +332,7 @@ remove_iterate_cb(tree_t *tree, node_t *node, void *data) { static node_t * remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; - node_t *ret = tree_prev(tree, node); + node_t *ret = tree_prev(tree, node); node_remove(tree, node, *nnodes); @@ -341,15 +348,11 @@ destroy_cb(node_t *node, void *data) { } TEST_BEGIN(test_rb_random) { - enum { - NNODES = 25, - NBAGS = 500, - SEED = 42 - }; - sfmt_t *sfmt; + enum { NNODES = 25, NBAGS = 500, SEED = 42 }; + sfmt_t *sfmt; uint64_t bag[NNODES]; - tree_t tree; - node_t nodes[NNODES]; + tree_t tree; + node_t nodes[NNODES]; unsigned i, j, k, black_height, imbalances; sfmt = init_gen_rand(SEED); @@ -386,8 +389,8 @@ TEST_BEGIN(test_rb_random) { for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; - nodes[k].specialness = gen_rand64_range(sfmt, - NNODES); + nodes[k].specialness = gen_rand64_range( + sfmt, NNODES); nodes[k].mid_remove = false; nodes[k].allow_duplicates = false; nodes[k].summary_lchild = NULL; @@ -399,16 +402,16 @@ TEST_BEGIN(test_rb_random) { for (k = 0; k < j; k++) { tree_insert(&tree, &nodes[k]); - rbtn_black_height(node_t, link, &tree, - black_height); - imbalances = tree_recurse(tree.rbt_root, - black_height, 0); - expect_u_eq(imbalances, 0, - "Tree is unbalanced"); + rbtn_black_height( + node_t, link, &tree, black_height); + imbalances = tree_recurse( + tree.rbt_root, black_height, 0); + expect_u_eq( + imbalances, 0, "Tree is unbalanced"); - expect_u_eq(tree_iterate(&tree), k+1, + expect_u_eq(tree_iterate(&tree), k + 1, "Unexpected node iteration count"); - expect_u_eq(tree_iterate_reverse(&tree), k+1, + expect_u_eq(tree_iterate_reverse(&tree), k + 1, "Unexpected node iteration count"); expect_false(tree_empty(&tree), @@ -431,11 +434,11 @@ TEST_BEGIN(test_rb_random) { break; case 1: for (k = j; k > 0; k--) { - node_remove(&tree, &nodes[k-1], k); + node_remove(&tree, &nodes[k - 1], k); } break; case 2: { - node_t *start; + node_t *start; unsigned nnodes = j; start = NULL; @@ -444,11 +447,12 @@ TEST_BEGIN(test_rb_random) { remove_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); - expect_u_eq(nnodes, 0, - "Removal terminated early"); + expect_u_eq( + nnodes, 0, "Removal terminated early"); break; - } case 3: { - node_t *start; + } + case 3: { + node_t *start; unsigned nnodes = j; start = NULL; @@ -458,16 +462,18 @@ TEST_BEGIN(test_rb_random) { (void *)&nnodes); nnodes--; } while (start != NULL); - expect_u_eq(nnodes, 0, - "Removal terminated early"); + expect_u_eq( + nnodes, 0, "Removal terminated early"); break; - } case 4: { + } + case 4: { unsigned nnodes = j; tree_destroy(&tree, destroy_cb, &nnodes); - expect_u_eq(nnodes, 0, - "Destruction terminated early"); + expect_u_eq( + nnodes, 0, "Destruction terminated early"); break; - } default: + } + default: not_reached(); } } @@ -479,7 +485,7 @@ TEST_END static void expect_simple_consistency(tree_t *tree, uint64_t specialness, bool expected_empty, node_t *expected_first, node_t *expected_last) { - bool empty; + bool empty; node_t *first; node_t *last; @@ -487,19 +493,17 @@ expect_simple_consistency(tree_t *tree, uint64_t specialness, &specialness_filter_subtree, &specialness); expect_b_eq(expected_empty, empty, ""); - first = tree_first_filtered(tree, - &specialness_filter_node, &specialness_filter_subtree, - (void *)&specialness); + first = tree_first_filtered(tree, &specialness_filter_node, + &specialness_filter_subtree, (void *)&specialness); expect_ptr_eq(expected_first, first, ""); - last = tree_last_filtered(tree, - &specialness_filter_node, &specialness_filter_subtree, - (void *)&specialness); + last = tree_last_filtered(tree, &specialness_filter_node, + &specialness_filter_subtree, (void *)&specialness); expect_ptr_eq(expected_last, last, ""); } TEST_BEGIN(test_rb_filter_simple) { - enum {FILTER_NODES = 10}; + enum { FILTER_NODES = 10 }; node_t nodes[FILTER_NODES]; for (unsigned i = 0; i < FILTER_NODES; i++) { nodes[i].magic = NODE_MAGIC; @@ -583,10 +587,10 @@ TEST_END typedef struct iter_ctx_s iter_ctx_t; struct iter_ctx_s { - int ncalls; + int ncalls; node_t *last_node; - int ncalls_max; + int ncalls_max; bool forward; }; @@ -624,8 +628,8 @@ static void check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { uint64_t specialness = 1; - bool empty; - bool real_empty = true; + bool empty; + bool real_empty = true; node_t *first; node_t *real_first = NULL; node_t *last; @@ -667,12 +671,14 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { } if (node_cmp(&nodes[j], &nodes[i]) < 0 && (real_prev_filtered == NULL - || node_cmp(&nodes[j], real_prev_filtered) > 0)) { + || node_cmp(&nodes[j], real_prev_filtered) + > 0)) { real_prev_filtered = &nodes[j]; } if (node_cmp(&nodes[j], &nodes[i]) > 0 && (real_next_filtered == NULL - || node_cmp(&nodes[j], real_next_filtered) < 0)) { + || node_cmp(&nodes[j], real_next_filtered) + < 0)) { real_next_filtered = &nodes[j]; } } @@ -707,8 +713,9 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { &specialness); expect_ptr_eq(real_search_filtered, search_filtered, ""); - real_nsearch_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : real_next_filtered); + real_nsearch_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : real_next_filtered); nsearch_filtered = tree_nsearch_filtered(tree, &before, &specialness_filter_node, &specialness_filter_subtree, &specialness); @@ -721,22 +728,25 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { expect_ptr_eq(real_psearch_filtered, psearch_filtered, ""); /* search, nsearch, psearch from nodes[i] */ - real_search_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : NULL); + real_search_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : NULL); search_filtered = tree_search_filtered(tree, &nodes[i], &specialness_filter_node, &specialness_filter_subtree, &specialness); expect_ptr_eq(real_search_filtered, search_filtered, ""); - real_nsearch_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : real_next_filtered); + real_nsearch_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : real_next_filtered); nsearch_filtered = tree_nsearch_filtered(tree, &nodes[i], &specialness_filter_node, &specialness_filter_subtree, &specialness); expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, ""); - real_psearch_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : real_prev_filtered); + real_psearch_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : real_prev_filtered); psearch_filtered = tree_psearch_filtered(tree, &nodes[i], &specialness_filter_node, &specialness_filter_subtree, &specialness); @@ -750,22 +760,25 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { equiv.magic = NODE_MAGIC; equiv.key = nodes[i].key; equiv.allow_duplicates = true; - real_search_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : NULL); + real_search_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : NULL); search_filtered = tree_search_filtered(tree, &equiv, &specialness_filter_node, &specialness_filter_subtree, &specialness); expect_ptr_eq(real_search_filtered, search_filtered, ""); - real_nsearch_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : real_next_filtered); + real_nsearch_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : real_next_filtered); nsearch_filtered = tree_nsearch_filtered(tree, &equiv, &specialness_filter_node, &specialness_filter_subtree, &specialness); expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, ""); - real_psearch_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : real_prev_filtered); + real_psearch_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : real_prev_filtered); psearch_filtered = tree_psearch_filtered(tree, &equiv, &specialness_filter_node, &specialness_filter_subtree, &specialness); @@ -791,8 +804,9 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { &specialness); expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, ""); - real_psearch_filtered = (nodes[i].specialness >= specialness ? - &nodes[i] : real_prev_filtered); + real_psearch_filtered = (nodes[i].specialness >= specialness + ? &nodes[i] + : real_prev_filtered); psearch_filtered = tree_psearch_filtered(tree, &after, &specialness_filter_node, &specialness_filter_subtree, &specialness); @@ -800,7 +814,7 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { } /* Filtered iteration test setup. */ - int nspecial = 0; + int nspecial = 0; node_t *sorted_nodes[UPDATE_TEST_MAX]; node_t *sorted_filtered_nodes[UPDATE_TEST_MAX]; for (int i = 0; i < nnodes; i++) { @@ -862,8 +876,9 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { &specialness_filter_node, &specialness_filter_subtree, &specialness); expect_d_eq(j + 1, ctx.ncalls, ""); - expect_ptr_eq(sorted_filtered_nodes[ - nodes[i].filtered_rank + j], iter_result, ""); + expect_ptr_eq( + sorted_filtered_nodes[nodes[i].filtered_rank + j], + iter_result, ""); } } @@ -888,8 +903,8 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { &specialness_filter_subtree, &specialness); expect_ptr_null(iter_result, ""); int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0); - expect_d_eq(nodes[i].filtered_rank + surplus_rank, ctx.ncalls, - ""); + expect_d_eq( + nodes[i].filtered_rank + surplus_rank, ctx.ncalls, ""); } /* Filtered backward iteration from the end, with stopping */ for (int i = 0; i < nspecial; i++) { @@ -899,15 +914,15 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { iter_result = tree_reverse_iter_filtered(tree, NULL, &tree_iterate_filtered_cb, &ctx, &specialness_filter_node, &specialness_filter_subtree, &specialness); - expect_ptr_eq(sorted_filtered_nodes[nspecial - i - 1], - iter_result, ""); + expect_ptr_eq( + sorted_filtered_nodes[nspecial - i - 1], iter_result, ""); expect_d_eq(ctx.ncalls, i + 1, ""); } /* Filtered backward iteration from a starting point, with stopping. */ for (int i = 0; i < nnodes; i++) { int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0); for (int j = 0; j < nodes[i].filtered_rank + surplus_rank; - j++) { + j++) { ctx.ncalls = 0; ctx.last_node = NULL; ctx.ncalls_max = j + 1; @@ -916,16 +931,16 @@ check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) { &specialness_filter_node, &specialness_filter_subtree, &specialness); expect_d_eq(j + 1, ctx.ncalls, ""); - expect_ptr_eq(sorted_filtered_nodes[ - nodes[i].filtered_rank - j - 1 + surplus_rank], + expect_ptr_eq( + sorted_filtered_nodes[nodes[i].filtered_rank - j - 1 + + surplus_rank], iter_result, ""); } } } static void -do_update_search_test(int nnodes, int ntrees, int nremovals, - int nupdates) { +do_update_search_test(int nnodes, int ntrees, int nremovals, int nupdates) { node_t nodes[UPDATE_TEST_MAX]; assert(nnodes <= UPDATE_TEST_MAX); @@ -987,8 +1002,8 @@ rb_gen(static UNUSED, unsummarized_tree_, unsummarized_tree_t, node_t, link, node_cmp); static node_t * -unsummarized_tree_iterate_cb(unsummarized_tree_t *tree, node_t *node, - void *data) { +unsummarized_tree_iterate_cb( + unsummarized_tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; (*i)++; return NULL; @@ -1002,18 +1017,14 @@ TEST_BEGIN(test_rb_unsummarized) { unsummarized_tree_t tree; unsummarized_tree_new(&tree); unsigned nnodes = 0; - unsummarized_tree_iter(&tree, NULL, &unsummarized_tree_iterate_cb, - &nnodes); + unsummarized_tree_iter( + &tree, NULL, &unsummarized_tree_iterate_cb, &nnodes); expect_u_eq(0, nnodes, ""); } TEST_END int main(void) { - return test_no_reentrancy( - test_rb_empty, - test_rb_random, - test_rb_filter_simple, - test_rb_update_search, - test_rb_unsummarized); + return test_no_reentrancy(test_rb_empty, test_rb_random, + test_rb_filter_simple, test_rb_update_search, test_rb_unsummarized); } diff --git a/test/unit/retained.c b/test/unit/retained.c index 40cbb0cd..687701c7 100644 --- a/test/unit/retained.c +++ b/test/unit/retained.c @@ -3,21 +3,22 @@ #include "jemalloc/internal/san.h" #include "jemalloc/internal/spin.h" -static unsigned arena_ind; -static size_t sz; -static size_t esz; -#define NEPOCHS 8 -#define PER_THD_NALLOCS 1 -static atomic_u_t epoch; -static atomic_u_t nfinished; +static unsigned arena_ind; +static size_t sz; +static size_t esz; +#define NEPOCHS 8 +#define PER_THD_NALLOCS 1 +static atomic_u_t epoch; +static atomic_u_t nfinished; static unsigned do_arena_create(extent_hooks_t *h) { unsigned new_arena_ind; - size_t ind_sz = sizeof(unsigned); - expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz, - (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, - "Unexpected mallctl() failure"); + size_t ind_sz = sizeof(unsigned); + expect_d_eq( + mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), + 0, "Unexpected mallctl() failure"); return new_arena_ind; } @@ -26,7 +27,7 @@ do_arena_destroy(unsigned ind) { size_t mib[3]; size_t miblen; - miblen = sizeof(mib)/sizeof(size_t); + miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)ind; @@ -38,7 +39,8 @@ static void do_refresh(void) { uint64_t refresh_epoch = 1; expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch, - sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure"); + sizeof(refresh_epoch)), + 0, "Unexpected mallctl() failure"); } static size_t @@ -47,12 +49,12 @@ do_get_size_impl(const char *cmd, unsigned ind) { size_t miblen = sizeof(mib) / sizeof(size_t); size_t z = sizeof(size_t); - expect_d_eq(mallctlnametomib(cmd, mib, &miblen), - 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + expect_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, + "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; size_t size; - expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), - 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); + expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), 0, + "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); return size; } @@ -72,9 +74,9 @@ thd_start(void *arg) { for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) { /* Busy-wait for next epoch. */ unsigned cur_epoch; - spin_t spinner = SPIN_INITIALIZER; - while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) != - next_epoch) { + spin_t spinner = SPIN_INITIALIZER; + while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) + != next_epoch) { spin_adaptive(&spinner); } expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch"); @@ -84,11 +86,10 @@ thd_start(void *arg) { * no need to deallocate. */ for (unsigned i = 0; i < PER_THD_NALLOCS; i++) { - void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) | - MALLOCX_TCACHE_NONE - ); - expect_ptr_not_null(p, - "Unexpected mallocx() failure\n"); + void *p = mallocx( + sz, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); + expect_ptr_not_null( + p, "Unexpected mallocx() failure\n"); } /* Let the main thread know we've finished this iteration. */ @@ -142,17 +143,17 @@ TEST_BEGIN(test_retained) { */ do_refresh(); - size_t allocated = (esz - guard_sz) * nthreads * - PER_THD_NALLOCS; + size_t allocated = (esz - guard_sz) * nthreads + * PER_THD_NALLOCS; size_t active = do_get_active(arena_ind); expect_zu_le(allocated, active, "Unexpected active memory"); size_t mapped = do_get_mapped(arena_ind); expect_zu_le(active, mapped, "Unexpected mapped memory"); arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false); - size_t usable = 0; - for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < - arena->pa_shard.pac.exp_grow.next; pind++) { + size_t usable = 0; + for (pszind_t pind = sz_psz2ind(HUGEPAGE); + pind < arena->pa_shard.pac.exp_grow.next; pind++) { size_t psz = sz_pind2sz(pind); size_t psz_fragmented = psz % esz; size_t psz_usable = psz - psz_fragmented; @@ -162,8 +163,8 @@ TEST_BEGIN(test_retained) { if (psz_usable > 0) { expect_zu_lt(usable, allocated, "Excessive retained memory " - "(%#zx[+%#zx] > %#zx)", usable, psz_usable, - allocated); + "(%#zx[+%#zx] > %#zx)", + usable, psz_usable, allocated); usable += psz_usable; } } @@ -174,8 +175,8 @@ TEST_BEGIN(test_retained) { * (rather than retaining) during reset. */ do_arena_destroy(arena_ind); - expect_u_eq(do_arena_create(NULL), arena_ind, - "Unexpected arena index"); + expect_u_eq( + do_arena_create(NULL), arena_ind, "Unexpected arena index"); } for (unsigned i = 0; i < nthreads; i++) { @@ -188,6 +189,5 @@ TEST_END int main(void) { - return test( - test_retained); + return test(test_retained); } diff --git a/test/unit/rtree.c b/test/unit/rtree.c index 4101b72b..284c3eae 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -16,14 +16,15 @@ TEST_BEGIN(test_rtree_read_empty) { /* metadata_use_hooks */ true); expect_ptr_not_null(base, "Unexpected base_new failure"); - rtree_t *rtree = &test_rtree; + rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); - expect_false(rtree_new(rtree, base, false), - "Unexpected rtree_new() failure"); + expect_false( + rtree_new(rtree, base, false), "Unexpected rtree_new() failure"); rtree_contents_t contents; - expect_true(rtree_read_independent(tsdn, rtree, &rtree_ctx, PAGE, - &contents), "rtree_read_independent() should fail on empty rtree."); + expect_true( + rtree_read_independent(tsdn, rtree, &rtree_ctx, PAGE, &contents), + "rtree_read_independent() should fail on empty rtree."); base_delete(tsdn, base); } @@ -45,9 +46,9 @@ TEST_BEGIN(test_rtree_extrema) { edata_t *edata_a, *edata_b; edata_a = alloc_edata(); edata_b = alloc_edata(); - edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS, - false, sz_size2index(SC_LARGE_MINCLASS), 0, - extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); + edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS, false, + sz_size2index(SC_LARGE_MINCLASS), 0, extent_state_active, false, + false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); edata_init(edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); @@ -57,11 +58,11 @@ TEST_BEGIN(test_rtree_extrema) { /* metadata_use_hooks */ true); expect_ptr_not_null(base, "Unexpected base_new failure"); - rtree_t *rtree = &test_rtree; + rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); - expect_false(rtree_new(rtree, base, false), - "Unexpected rtree_new() failure"); + expect_false( + rtree_new(rtree, base, false), "Unexpected rtree_new() failure"); rtree_contents_t contents_a; contents_a.edata = edata_a; @@ -73,13 +74,14 @@ TEST_BEGIN(test_rtree_extrema) { "Unexpected rtree_write() failure"); expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a), "Unexpected rtree_write() failure"); - rtree_contents_t read_contents_a = rtree_read(tsdn, rtree, &rtree_ctx, - PAGE); + rtree_contents_t read_contents_a = rtree_read( + tsdn, rtree, &rtree_ctx, PAGE); expect_true(contents_a.edata == read_contents_a.edata - && contents_a.metadata.szind == read_contents_a.metadata.szind - && contents_a.metadata.slab == read_contents_a.metadata.slab - && contents_a.metadata.is_head == read_contents_a.metadata.is_head - && contents_a.metadata.state == read_contents_a.metadata.state, + && contents_a.metadata.szind == read_contents_a.metadata.szind + && contents_a.metadata.slab == read_contents_a.metadata.slab + && contents_a.metadata.is_head + == read_contents_a.metadata.is_head + && contents_a.metadata.state == read_contents_a.metadata.state, "rtree_read() should return previously set value"); rtree_contents_t contents_b; @@ -88,15 +90,17 @@ TEST_BEGIN(test_rtree_extrema) { contents_b.metadata.slab = edata_slab_get(edata_b); contents_b.metadata.is_head = edata_is_head_get(edata_b); contents_b.metadata.state = edata_state_get(edata_b); - expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), - contents_b), "Unexpected rtree_write() failure"); - rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx, - ~((uintptr_t)0)); + expect_false( + rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), contents_b), + "Unexpected rtree_write() failure"); + rtree_contents_t read_contents_b = rtree_read( + tsdn, rtree, &rtree_ctx, ~((uintptr_t)0)); assert_true(contents_b.edata == read_contents_b.edata - && contents_b.metadata.szind == read_contents_b.metadata.szind - && contents_b.metadata.slab == read_contents_b.metadata.slab - && contents_b.metadata.is_head == read_contents_b.metadata.is_head - && contents_b.metadata.state == read_contents_b.metadata.state, + && contents_b.metadata.szind == read_contents_b.metadata.szind + && contents_b.metadata.slab == read_contents_b.metadata.slab + && contents_b.metadata.is_head + == read_contents_b.metadata.is_head + && contents_b.metadata.state == read_contents_b.metadata.state, "rtree_read() should return previously set value"); base_delete(tsdn, base); @@ -109,19 +113,19 @@ TEST_BEGIN(test_rtree_bits) { /* metadata_use_hooks */ true); expect_ptr_not_null(base, "Unexpected base_new failure"); - uintptr_t keys[] = {PAGE, PAGE + 1, - PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; + uintptr_t keys[] = { + PAGE, PAGE + 1, PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; edata_t *edata_c = alloc_edata(); edata_init(edata_c, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); - rtree_t *rtree = &test_rtree; + rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); - expect_false(rtree_new(rtree, base, false), - "Unexpected rtree_new() failure"); + expect_false( + rtree_new(rtree, base, false), "Unexpected rtree_new() failure"); - for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { + for (unsigned i = 0; i < sizeof(keys) / sizeof(uintptr_t); i++) { rtree_contents_t contents; contents.edata = edata_c; contents.metadata.szind = SC_NSIZES; @@ -129,18 +133,22 @@ TEST_BEGIN(test_rtree_bits) { contents.metadata.is_head = false; contents.metadata.state = extent_state_active; - expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], - contents), "Unexpected rtree_write() failure"); - for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { - expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, - keys[j]).edata, edata_c, + expect_false( + rtree_write(tsdn, rtree, &rtree_ctx, keys[i], contents), + "Unexpected rtree_write() failure"); + for (unsigned j = 0; j < sizeof(keys) / sizeof(uintptr_t); + j++) { + expect_ptr_eq( + rtree_read(tsdn, rtree, &rtree_ctx, keys[j]).edata, + edata_c, "rtree_edata_read() should return previously set " "value and ignore insignificant key bits; i=%u, " - "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, - j, keys[i], keys[j]); + "j=%u, set key=%#" FMTxPTR ", get key=%#" FMTxPTR, + i, j, keys[i], keys[j]); } expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx, - (((uintptr_t)2) << LG_PAGE)).edata, + (((uintptr_t)2) << LG_PAGE)) + .edata, "Only leftmost rtree leaf should be set; i=%u", i); rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); } @@ -159,8 +167,8 @@ TEST_BEGIN(test_rtree_random) { /* metadata_use_hooks */ true); expect_ptr_not_null(base, "Unexpected base_new failure"); - uintptr_t keys[NSET]; - rtree_t *rtree = &test_rtree; + uintptr_t keys[NSET]; + rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); @@ -168,15 +176,15 @@ TEST_BEGIN(test_rtree_random) { edata_init(edata_d, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); - expect_false(rtree_new(rtree, base, false), - "Unexpected rtree_new() failure"); + expect_false( + rtree_new(rtree, base, false), "Unexpected rtree_new() failure"); for (unsigned i = 0; i < NSET; i++) { keys[i] = (uintptr_t)gen_rand64(sfmt); - rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, - &rtree_ctx, keys[i], false, true); - expect_ptr_not_null(elm, - "Unexpected rtree_leaf_elm_lookup() failure"); + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup( + tsdn, rtree, &rtree_ctx, keys[i], false, true); + expect_ptr_not_null( + elm, "Unexpected rtree_leaf_elm_lookup() failure"); rtree_contents_t contents; contents.edata = edata_d; contents.metadata.szind = SC_NSIZES; @@ -184,26 +192,27 @@ TEST_BEGIN(test_rtree_random) { contents.metadata.is_head = false; contents.metadata.state = edata_state_get(edata_d); rtree_leaf_elm_write(tsdn, rtree, elm, contents); - expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, - keys[i]).edata, edata_d, + expect_ptr_eq( + rtree_read(tsdn, rtree, &rtree_ctx, keys[i]).edata, edata_d, "rtree_edata_read() should return previously set value"); } for (unsigned i = 0; i < NSET; i++) { - expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, - keys[i]).edata, edata_d, + expect_ptr_eq( + rtree_read(tsdn, rtree, &rtree_ctx, keys[i]).edata, edata_d, "rtree_edata_read() should return previously set value, " - "i=%u", i); + "i=%u", + i); } for (unsigned i = 0; i < NSET; i++) { rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); - expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx, - keys[i]).edata, - "rtree_edata_read() should return previously set value"); + expect_ptr_null( + rtree_read(tsdn, rtree, &rtree_ctx, keys[i]).edata, + "rtree_edata_read() should return previously set value"); } for (unsigned i = 0; i < NSET; i++) { - expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx, - keys[i]).edata, + expect_ptr_null( + rtree_read(tsdn, rtree, &rtree_ctx, keys[i]).edata, "rtree_edata_read() should return previously set value"); } @@ -215,8 +224,8 @@ TEST_BEGIN(test_rtree_random) { TEST_END static void -test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start, - uintptr_t end) { +test_rtree_range_write( + tsdn_t *tsdn, rtree_t *rtree, uintptr_t start, uintptr_t end) { rtree_ctx_t rtree_ctx; rtree_ctx_data_init(&rtree_ctx); @@ -230,15 +239,17 @@ test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start, contents.metadata.is_head = false; contents.metadata.state = extent_state_active; - expect_false(rtree_write(tsdn, rtree, &rtree_ctx, start, - contents), "Unexpected rtree_write() failure"); - expect_false(rtree_write(tsdn, rtree, &rtree_ctx, end, - contents), "Unexpected rtree_write() failure"); + expect_false(rtree_write(tsdn, rtree, &rtree_ctx, start, contents), + "Unexpected rtree_write() failure"); + expect_false(rtree_write(tsdn, rtree, &rtree_ctx, end, contents), + "Unexpected rtree_write() failure"); rtree_write_range(tsdn, rtree, &rtree_ctx, start, end, contents); for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) { - expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx, - start + (i << LG_PAGE)).edata, edata_e, + expect_ptr_eq( + rtree_read(tsdn, rtree, &rtree_ctx, start + (i << LG_PAGE)) + .edata, + edata_e, "rtree_edata_read() should return previously set value"); } rtree_clear_range(tsdn, rtree, &rtree_ctx, start, end); @@ -247,8 +258,9 @@ test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start, elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx, start + (i << LG_PAGE), false, false); expect_ptr_not_null(elm, "Should have been initialized."); - expect_ptr_null(rtree_leaf_elm_read(tsdn, rtree, elm, - false).edata, "Should have been cleared."); + expect_ptr_null( + rtree_leaf_elm_read(tsdn, rtree, elm, false).edata, + "Should have been cleared."); } } @@ -259,8 +271,8 @@ TEST_BEGIN(test_rtree_range) { expect_ptr_not_null(base, "Unexpected base_new failure"); rtree_t *rtree = &test_rtree; - expect_false(rtree_new(rtree, base, false), - "Unexpected rtree_new() failure"); + expect_false( + rtree_new(rtree, base, false), "Unexpected rtree_new() failure"); /* Not crossing rtree node boundary first. */ uintptr_t start = ZU(1) << rtree_leaf_maskbits(); @@ -280,10 +292,6 @@ TEST_END int main(void) { - return test( - test_rtree_read_empty, - test_rtree_extrema, - test_rtree_bits, - test_rtree_random, - test_rtree_range); + return test(test_rtree_read_empty, test_rtree_extrema, test_rtree_bits, + test_rtree_random, test_rtree_range); } diff --git a/test/unit/safety_check.c b/test/unit/safety_check.c index 84726675..558797c0 100644 --- a/test/unit/safety_check.c +++ b/test/unit/safety_check.c @@ -8,7 +8,8 @@ */ bool fake_abort_called; -void fake_abort(const char *message) { +void +fake_abort(const char *message) { (void)message; fake_abort_called = true; } @@ -26,7 +27,7 @@ TEST_BEGIN(test_malloc_free_overflow) { safety_check_set_abort(&fake_abort); /* Buffer overflow! */ - char* ptr = malloc(128); + char *ptr = malloc(128); buffer_overflow_write(ptr, 128); free(ptr); safety_check_set_abort(NULL); @@ -42,7 +43,7 @@ TEST_BEGIN(test_mallocx_dallocx_overflow) { safety_check_set_abort(&fake_abort); /* Buffer overflow! */ - char* ptr = mallocx(128, 0); + char *ptr = mallocx(128, 0); buffer_overflow_write(ptr, 128); dallocx(ptr, 0); safety_check_set_abort(NULL); @@ -58,7 +59,7 @@ TEST_BEGIN(test_malloc_sdallocx_overflow) { safety_check_set_abort(&fake_abort); /* Buffer overflow! */ - char* ptr = malloc(128); + char *ptr = malloc(128); buffer_overflow_write(ptr, 128); sdallocx(ptr, 128, 0); safety_check_set_abort(NULL); @@ -74,7 +75,7 @@ TEST_BEGIN(test_realloc_overflow) { safety_check_set_abort(&fake_abort); /* Buffer overflow! */ - char* ptr = malloc(128); + char *ptr = malloc(128); buffer_overflow_write(ptr, 128); ptr = realloc(ptr, 129); safety_check_set_abort(NULL); @@ -91,7 +92,7 @@ TEST_BEGIN(test_rallocx_overflow) { safety_check_set_abort(&fake_abort); /* Buffer overflow! */ - char* ptr = malloc(128); + char *ptr = malloc(128); buffer_overflow_write(ptr, 128); ptr = rallocx(ptr, 129, 0); safety_check_set_abort(NULL); @@ -108,7 +109,7 @@ TEST_BEGIN(test_xallocx_overflow) { safety_check_set_abort(&fake_abort); /* Buffer overflow! */ - char* ptr = malloc(128); + char *ptr = malloc(128); buffer_overflow_write(ptr, 128); size_t result = xallocx(ptr, 129, 0, 0); expect_zu_eq(result, 128, ""); @@ -120,7 +121,7 @@ TEST_BEGIN(test_xallocx_overflow) { TEST_END TEST_BEGIN(test_realloc_no_overflow) { - char* ptr = malloc(128); + char *ptr = malloc(128); ptr = realloc(ptr, 256); ptr[128] = 0; ptr[255] = 0; @@ -135,7 +136,7 @@ TEST_BEGIN(test_realloc_no_overflow) { TEST_END TEST_BEGIN(test_rallocx_no_overflow) { - char* ptr = malloc(128); + char *ptr = malloc(128); ptr = rallocx(ptr, 256, 0); ptr[128] = 0; ptr[255] = 0; @@ -151,13 +152,8 @@ TEST_END int main(void) { - return test( - test_malloc_free_overflow, - test_mallocx_dallocx_overflow, - test_malloc_sdallocx_overflow, - test_realloc_overflow, - test_rallocx_overflow, - test_xallocx_overflow, - test_realloc_no_overflow, - test_rallocx_no_overflow); + return test(test_malloc_free_overflow, test_mallocx_dallocx_overflow, + test_malloc_sdallocx_overflow, test_realloc_overflow, + test_rallocx_overflow, test_xallocx_overflow, + test_realloc_no_overflow, test_rallocx_no_overflow); } diff --git a/test/unit/san.c b/test/unit/san.c index 5b98f52e..2c7f1ec5 100644 --- a/test/unit/san.c +++ b/test/unit/san.c @@ -6,8 +6,8 @@ static void verify_extent_guarded(tsdn_t *tsdn, void *ptr) { - expect_true(extent_is_guarded(tsdn, ptr), - "All extents should be guarded."); + expect_true( + extent_is_guarded(tsdn, ptr), "All extents should be guarded."); } #define MAX_SMALL_ALLOCATIONS 4096 @@ -21,13 +21,13 @@ void *small_alloc[MAX_SMALL_ALLOCATIONS]; TEST_BEGIN(test_guarded_small) { test_skip_if(opt_prof); - tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); unsigned npages = 16, pages_found = 0, ends_found = 0; VARIABLE_ARRAY(uintptr_t, pages, npages); /* Allocate to get sanitized pointers. */ - size_t slab_sz = PAGE; - size_t sz = slab_sz / 8; + size_t slab_sz = PAGE; + size_t sz = slab_sz / 8; unsigned n_alloc = 0; while (n_alloc < MAX_SMALL_ALLOCATIONS) { void *ptr = malloc(sz); @@ -54,8 +54,9 @@ TEST_BEGIN(test_guarded_small) { /* Verify the pages are not continuous, i.e. separated by guards. */ for (unsigned i = 0; i < npages - 1; i++) { for (unsigned j = i + 1; j < npages; j++) { - uintptr_t ptr_diff = pages[i] > pages[j] ? - pages[i] - pages[j] : pages[j] - pages[i]; + uintptr_t ptr_diff = pages[i] > pages[j] + ? pages[i] - pages[j] + : pages[j] - pages[i]; expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE, "There should be at least one pages between " "guarded slabs"); @@ -69,7 +70,7 @@ TEST_BEGIN(test_guarded_small) { TEST_END TEST_BEGIN(test_guarded_large) { - tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); unsigned nlarge = 32; VARIABLE_ARRAY(uintptr_t, large, nlarge); @@ -85,8 +86,9 @@ TEST_BEGIN(test_guarded_large) { /* Verify the pages are not continuous, i.e. separated by guards. */ for (unsigned i = 0; i < nlarge; i++) { for (unsigned j = i + 1; j < nlarge; j++) { - uintptr_t ptr_diff = large[i] > large[j] ? - large[i] - large[j] : large[j] - large[i]; + uintptr_t ptr_diff = large[i] > large[j] + ? large[i] - large[j] + : large[j] - large[i]; expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE, "There should be at least two pages between " " guarded large allocations"); @@ -102,15 +104,13 @@ TEST_END static void verify_pdirty(unsigned arena_ind, uint64_t expected) { uint64_t pdirty = get_arena_pdirty(arena_ind); - expect_u64_eq(pdirty, expected / PAGE, - "Unexpected dirty page amount."); + expect_u64_eq(pdirty, expected / PAGE, "Unexpected dirty page amount."); } static void verify_pmuzzy(unsigned arena_ind, uint64_t expected) { uint64_t pmuzzy = get_arena_pmuzzy(arena_ind); - expect_u64_eq(pmuzzy, expected / PAGE, - "Unexpected muzzy page amount."); + expect_u64_eq(pmuzzy, expected / PAGE, "Unexpected muzzy page amount."); } TEST_BEGIN(test_guarded_decay) { @@ -140,7 +140,7 @@ TEST_BEGIN(test_guarded_decay) { verify_pmuzzy(arena_ind, 0); tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); - int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; /* Should reuse dirty extents for the two mallocx. */ void *p1 = do_mallocx(sz1, flags); @@ -200,8 +200,5 @@ TEST_END int main(void) { - return test( - test_guarded_small, - test_guarded_large, - test_guarded_decay); + return test(test_guarded_small, test_guarded_large, test_guarded_decay); } diff --git a/test/unit/san_bump.c b/test/unit/san_bump.c index cafa37fe..9aa0210e 100644 --- a/test/unit/san_bump.c +++ b/test/unit/san_bump.c @@ -16,12 +16,12 @@ TEST_BEGIN(test_san_bump_alloc) { assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena"); arena_t *arena = arena_get(tsdn, arena_ind, false); - pac_t *pac = &arena->pa_shard.pac; + pac_t *pac = &arena->pa_shard.pac; - size_t alloc_size = PAGE * 16; - size_t alloc_n = alloc_size / sizeof(unsigned); - edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac), - alloc_size, /* zero */ false); + size_t alloc_size = PAGE * 16; + size_t alloc_n = alloc_size / sizeof(unsigned); + edata_t *edata = san_bump_alloc( + tsdn, &sba, pac, pac_ehooks_get(pac), alloc_size, /* zero */ false); expect_ptr_not_null(edata, "Failed to allocate edata"); expect_u_eq(edata_arena_ind_get(edata), arena_ind, @@ -39,10 +39,10 @@ TEST_BEGIN(test_san_bump_alloc) { ((unsigned *)ptr)[i] = 1; } - size_t alloc_size2 = PAGE * 28; - size_t alloc_n2 = alloc_size / sizeof(unsigned); - edata_t *edata2 = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac), - alloc_size2, /* zero */ true); + size_t alloc_size2 = PAGE * 28; + size_t alloc_n2 = alloc_size / sizeof(unsigned); + edata_t *edata2 = san_bump_alloc( + tsdn, &sba, pac, pac_ehooks_get(pac), alloc_size2, /* zero */ true); expect_ptr_not_null(edata2, "Failed to allocate edata"); expect_u_eq(edata_arena_ind_get(edata2), arena_ind, @@ -57,11 +57,11 @@ TEST_BEGIN(test_san_bump_alloc) { expect_ptr_not_null(ptr, "Edata was assigned an invalid address"); uintptr_t ptrdiff = ptr2 > ptr ? (uintptr_t)ptr2 - (uintptr_t)ptr - : (uintptr_t)ptr - (uintptr_t)ptr2; - size_t between_allocs = (size_t)ptrdiff - alloc_size; + : (uintptr_t)ptr - (uintptr_t)ptr2; + size_t between_allocs = (size_t)ptrdiff - alloc_size; - expect_zu_ge(between_allocs, PAGE, - "Guard page between allocs is missing"); + expect_zu_ge( + between_allocs, PAGE, "Guard page between allocs is missing"); for (unsigned i = 0; i < alloc_n2; ++i) { expect_u_eq(((unsigned *)ptr2)[i], 0, "Memory is not zeroed"); @@ -81,11 +81,11 @@ TEST_BEGIN(test_large_alloc_size) { assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena"); arena_t *arena = arena_get(tsdn, arena_ind, false); - pac_t *pac = &arena->pa_shard.pac; + pac_t *pac = &arena->pa_shard.pac; - size_t alloc_size = SBA_RETAINED_ALLOC_SIZE * 2; - edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac), - alloc_size, /* zero */ false); + size_t alloc_size = SBA_RETAINED_ALLOC_SIZE * 2; + edata_t *edata = san_bump_alloc( + tsdn, &sba, pac, pac_ehooks_get(pac), alloc_size, /* zero */ false); expect_u_eq(edata_arena_ind_get(edata), arena_ind, "Edata was assigned an incorrect arena id"); expect_zu_eq(edata_size_get(edata), alloc_size, @@ -105,7 +105,5 @@ TEST_END int main(void) { - return test( - test_san_bump_alloc, - test_large_alloc_size); + return test(test_san_bump_alloc, test_large_alloc_size); } diff --git a/test/unit/sc.c b/test/unit/sc.c index d207481c..725ede0e 100644 --- a/test/unit/sc.c +++ b/test/unit/sc.c @@ -4,7 +4,7 @@ TEST_BEGIN(test_update_slab_size) { sc_data_t data; memset(&data, 0, sizeof(data)); sc_data_init(&data); - sc_t *tiny = &data.sc[0]; + sc_t *tiny = &data.sc[0]; size_t tiny_size = (ZU(1) << tiny->lg_base) + (ZU(tiny->ndelta) << tiny->lg_delta); size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1; @@ -13,14 +13,14 @@ TEST_BEGIN(test_update_slab_size) { sc_data_update_slab_size(&data, 1, 10 * PAGE, 1); for (int i = 0; i < data.nbins; i++) { - sc_t *sc = &data.sc[i]; + sc_t *sc = &data.sc[i]; size_t reg_size = (ZU(1) << sc->lg_base) + (ZU(sc->ndelta) << sc->lg_delta); if (reg_size <= PAGE) { expect_d_eq(sc->pgs, 1, "Ignored valid page size hint"); } else { - expect_d_gt(sc->pgs, 1, - "Allowed invalid page size hint"); + expect_d_gt( + sc->pgs, 1, "Allowed invalid page size hint"); } } } @@ -28,6 +28,5 @@ TEST_END int main(void) { - return test( - test_update_slab_size); + return test(test_update_slab_size); } diff --git a/test/unit/sec.c b/test/unit/sec.c index cfef043f..d57c66ec 100644 --- a/test/unit/sec.c +++ b/test/unit/sec.c @@ -4,8 +4,8 @@ typedef struct pai_test_allocator_s pai_test_allocator_t; struct pai_test_allocator_s { - pai_t pai; - bool alloc_fail; + pai_t pai; + bool alloc_fail; size_t alloc_count; size_t alloc_batch_count; size_t dalloc_count; @@ -17,10 +17,10 @@ struct pai_test_allocator_s { * pointers it gets back; this is mostly just helpful for debugging. */ uintptr_t next_ptr; - size_t expand_count; - bool expand_return_value; - size_t shrink_count; - bool shrink_return_value; + size_t expand_count; + bool expand_return_value; + size_t shrink_count; + bool shrink_return_value; }; static void @@ -82,8 +82,7 @@ pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, for (size_t i = 0; i < nallocs; i++) { edata_t *edata = malloc(sizeof(edata_t)); assert_ptr_not_null(edata, ""); - edata_init(edata, /* arena_ind */ 0, - (void *)ta->next_ptr, size, + edata_init(edata, /* arena_ind */ 0, (void *)ta->next_ptr, size, /* slab */ false, /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ false, /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD); @@ -112,8 +111,8 @@ pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, } static void -pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, - bool *deferred_work_generated) { +pai_test_allocator_dalloc( + tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated) { pai_test_allocator_t *ta = (pai_test_allocator_t *)self; ta->dalloc_count++; free(edata); @@ -174,7 +173,7 @@ TEST_BEGIN(test_reuse) { enum { NALLOCS = 11 }; edata_t *one_page[NALLOCS]; edata_t *two_page[NALLOCS]; - bool deferred_work_generated = false; + bool deferred_work_generated = false; test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE, /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE)); for (int i = 0; i < NALLOCS; i++) { @@ -189,26 +188,24 @@ TEST_BEGIN(test_reuse) { } expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs"); size_t max_allocs = ta.alloc_count + ta.alloc_batch_count; - expect_zu_le(2 * NALLOCS, max_allocs, - "Incorrect number of allocations"); - expect_zu_eq(0, ta.dalloc_count, - "Incorrect number of allocations"); + expect_zu_le( + 2 * NALLOCS, max_allocs, "Incorrect number of allocations"); + expect_zu_eq(0, ta.dalloc_count, "Incorrect number of allocations"); /* * Free in a different order than we allocated, to make sure free-list * separation works correctly. */ for (int i = NALLOCS - 1; i >= 0; i--) { - pai_dalloc(tsdn, &sec.pai, one_page[i], - &deferred_work_generated); + pai_dalloc( + tsdn, &sec.pai, one_page[i], &deferred_work_generated); } for (int i = NALLOCS - 1; i >= 0; i--) { - pai_dalloc(tsdn, &sec.pai, two_page[i], - &deferred_work_generated); + pai_dalloc( + tsdn, &sec.pai, two_page[i], &deferred_work_generated); } expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count, "Incorrect number of allocations"); - expect_zu_eq(0, ta.dalloc_count, - "Incorrect number of allocations"); + expect_zu_eq(0, ta.dalloc_count, "Incorrect number of allocations"); /* * Check that the n'th most recent deallocated extent is returned for * the n'th alloc request of a given size. @@ -220,19 +217,15 @@ TEST_BEGIN(test_reuse) { edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE, /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, &deferred_work_generated); - expect_ptr_eq(one_page[i], alloc1, - "Got unexpected allocation"); - expect_ptr_eq(two_page[i], alloc2, - "Got unexpected allocation"); + expect_ptr_eq(one_page[i], alloc1, "Got unexpected allocation"); + expect_ptr_eq(two_page[i], alloc2, "Got unexpected allocation"); } expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count, "Incorrect number of allocations"); - expect_zu_eq(0, ta.dalloc_count, - "Incorrect number of allocations"); + expect_zu_eq(0, ta.dalloc_count, "Incorrect number of allocations"); } TEST_END - TEST_BEGIN(test_auto_flush) { pai_test_allocator_t ta; pai_test_allocator_init(&ta); @@ -251,7 +244,7 @@ TEST_BEGIN(test_auto_flush) { enum { NALLOCS = 10 }; edata_t *extra_alloc; edata_t *allocs[NALLOCS]; - bool deferred_work_generated = false; + bool deferred_work_generated = false; test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE, /* max_bytes */ NALLOCS * PAGE); for (int i = 0; i < NALLOCS; i++) { @@ -265,18 +258,16 @@ TEST_BEGIN(test_auto_flush) { &deferred_work_generated); expect_ptr_not_null(extra_alloc, "Unexpected alloc failure"); size_t max_allocs = ta.alloc_count + ta.alloc_batch_count; - expect_zu_le(NALLOCS + 1, max_allocs, - "Incorrect number of allocations"); - expect_zu_eq(0, ta.dalloc_count, - "Incorrect number of allocations"); + expect_zu_le( + NALLOCS + 1, max_allocs, "Incorrect number of allocations"); + expect_zu_eq(0, ta.dalloc_count, "Incorrect number of allocations"); /* Free until the SEC is full, but should not have flushed yet. */ for (int i = 0; i < NALLOCS; i++) { pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated); } - expect_zu_le(NALLOCS + 1, max_allocs, - "Incorrect number of allocations"); - expect_zu_eq(0, ta.dalloc_count, - "Incorrect number of allocations"); + expect_zu_le( + NALLOCS + 1, max_allocs, "Incorrect number of allocations"); + expect_zu_eq(0, ta.dalloc_count, "Incorrect number of allocations"); /* * Free the extra allocation; this should trigger a flush. The internal * flushing logic is allowed to get complicated; for now, we rely on our @@ -308,7 +299,7 @@ do_disable_flush_test(bool is_disable) { enum { NALLOCS = 11 }; edata_t *allocs[NALLOCS]; - bool deferred_work_generated = false; + bool deferred_work_generated = false; test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE, /* max_bytes */ NALLOCS * PAGE); for (int i = 0; i < NALLOCS; i++) { @@ -324,8 +315,7 @@ do_disable_flush_test(bool is_disable) { size_t max_allocs = ta.alloc_count + ta.alloc_batch_count; expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations"); - expect_zu_eq(0, ta.dalloc_count, - "Incorrect number of allocations"); + expect_zu_eq(0, ta.dalloc_count, "Incorrect number of allocations"); if (is_disable) { sec_disable(tsdn, &sec); @@ -345,8 +335,8 @@ do_disable_flush_test(bool is_disable) { * If we free into a disabled SEC, it should forward to the fallback. * Otherwise, the SEC should accept the allocation. */ - pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1], - &deferred_work_generated); + pai_dalloc( + tsdn, &sec.pai, allocs[NALLOCS - 1], &deferred_work_generated); expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count, "Incorrect number of allocations"); @@ -382,18 +372,18 @@ TEST_BEGIN(test_max_alloc_respected) { /* max_bytes */ 1000 * PAGE); for (size_t i = 0; i < 100; i++) { - expect_zu_eq(i, ta.alloc_count, - "Incorrect number of allocations"); - expect_zu_eq(i, ta.dalloc_count, - "Incorrect number of deallocations"); + expect_zu_eq( + i, ta.alloc_count, "Incorrect number of allocations"); + expect_zu_eq( + i, ta.dalloc_count, "Incorrect number of deallocations"); edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc, PAGE, /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, &deferred_work_generated); expect_ptr_not_null(edata, "Unexpected alloc failure"); - expect_zu_eq(i + 1, ta.alloc_count, - "Incorrect number of allocations"); - expect_zu_eq(i, ta.dalloc_count, - "Incorrect number of deallocations"); + expect_zu_eq( + i + 1, ta.alloc_count, "Incorrect number of allocations"); + expect_zu_eq( + i, ta.dalloc_count, "Incorrect number of deallocations"); pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated); } } @@ -435,8 +425,8 @@ TEST_BEGIN(test_expand_shrink_delegate) { expect_false(err, "Unexpected shrink failure"); expect_zu_eq(1, ta.shrink_count, ""); ta.shrink_return_value = true; - err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE, - &deferred_work_generated); + err = pai_shrink( + tsdn, &sec.pai, edata, 2 * PAGE, PAGE, &deferred_work_generated); expect_true(err, "Unexpected shrink success"); expect_zu_eq(2, ta.shrink_count, ""); } @@ -455,7 +445,7 @@ TEST_BEGIN(test_nshards_0) { opts.nshards = 0; sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts); - bool deferred_work_generated = false; + bool deferred_work_generated = false; edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false, /* guarded */ false, /* frequent_reuse */ false, &deferred_work_generated); @@ -570,8 +560,9 @@ TEST_BEGIN(test_stats_auto_flush) { pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated); - expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count - - ta.dalloc_count - ta.dalloc_batch_count); + expect_stats_pages(tsdn, &sec, + ta.alloc_count + ta.alloc_batch_count - ta.dalloc_count + - ta.dalloc_batch_count); } TEST_END @@ -590,7 +581,7 @@ TEST_BEGIN(test_stats_manual_flush) { test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE, /* max_bytes */ FLUSH_PAGES * PAGE); - bool deferred_work_generated = false; + bool deferred_work_generated = false; edata_t *allocs[FLUSH_PAGES]; for (size_t i = 0; i < FLUSH_PAGES; i++) { allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, @@ -621,15 +612,8 @@ TEST_END int main(void) { - return test( - test_reuse, - test_auto_flush, - test_disable, - test_flush, - test_max_alloc_respected, - test_expand_shrink_delegate, - test_nshards_0, - test_stats_simple, - test_stats_auto_flush, + return test(test_reuse, test_auto_flush, test_disable, test_flush, + test_max_alloc_respected, test_expand_shrink_delegate, + test_nshards_0, test_stats_simple, test_stats_auto_flush, test_stats_manual_flush); } diff --git a/test/unit/seq.c b/test/unit/seq.c index 06ed6834..ca6c74b1 100644 --- a/test/unit/seq.c +++ b/test/unit/seq.c @@ -24,7 +24,7 @@ expect_data(data_t *data) { seq_define(data_t, data) -typedef struct thd_data_s thd_data_t; + typedef struct thd_data_s thd_data_t; struct thd_data_s { seq_data_t data; }; @@ -32,8 +32,8 @@ struct thd_data_s { static void * seq_reader_thd(void *arg) { thd_data_t *thd_data = (thd_data_t *)arg; - int iter = 0; - data_t local_data; + int iter = 0; + data_t local_data; while (iter < 1000 * 1000 - 1) { bool success = seq_try_load_data(&local_data, &thd_data->data); if (success) { @@ -49,7 +49,7 @@ seq_reader_thd(void *arg) { static void * seq_writer_thd(void *arg) { thd_data_t *thd_data = (thd_data_t *)arg; - data_t local_data; + data_t local_data; memset(&local_data, 0, sizeof(local_data)); for (int i = 0; i < 1000 * 1000; i++) { set_data(&local_data, i); @@ -74,7 +74,7 @@ TEST_BEGIN(test_seq_threaded) { TEST_END TEST_BEGIN(test_seq_simple) { - data_t data; + data_t data; seq_data_t seq; memset(&seq, 0, sizeof(seq)); for (int i = 0; i < 1000 * 1000; i++) { @@ -88,8 +88,7 @@ TEST_BEGIN(test_seq_simple) { } TEST_END -int main(void) { - return test_no_reentrancy( - test_seq_simple, - test_seq_threaded); +int +main(void) { + return test_no_reentrancy(test_seq_simple, test_seq_threaded); } diff --git a/test/unit/size_check.c b/test/unit/size_check.c index 3cb3bc9c..a31578bf 100644 --- a/test/unit/size_check.c +++ b/test/unit/size_check.c @@ -3,7 +3,8 @@ #include "jemalloc/internal/safety_check.h" bool fake_abort_called; -void fake_abort(const char *message) { +void +fake_abort(const char *message) { (void)message; fake_abort_called = true; } @@ -72,8 +73,7 @@ TEST_END int main(void) { - return test( - test_invalid_size_sdallocx, + return test(test_invalid_size_sdallocx, test_invalid_size_sdallocx_nonzero_flag, test_invalid_size_sdallocx_noflags); } diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c index c373829c..5379047c 100644 --- a/test/unit/size_classes.c +++ b/test/unit/size_classes.c @@ -3,12 +3,13 @@ static size_t get_max_size_class(void) { unsigned nlextents; - size_t mib[4]; - size_t sz, miblen, max_size_class; + size_t mib[4]; + size_t sz, miblen, max_size_class; sz = sizeof(unsigned); - expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, - 0), 0, "Unexpected mallctl() error"); + expect_d_eq( + mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, + "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, @@ -16,30 +17,34 @@ get_max_size_class(void) { mib[2] = nlextents - 1; sz = sizeof(size_t); - expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, - NULL, 0), 0, "Unexpected mallctlbymib() error"); + expect_d_eq( + mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() error"); return max_size_class; } TEST_BEGIN(test_size_classes) { - size_t size_class, max_size_class; + size_t size_class, max_size_class; szind_t index, gen_index, max_index; - max_size_class = sz_large_size_classes_disabled()? SC_SMALL_MAXCLASS: - get_max_size_class(); + max_size_class = sz_large_size_classes_disabled() + ? SC_SMALL_MAXCLASS + : get_max_size_class(); max_index = sz_size2index(max_size_class); - for (index = 0, size_class = sz_index2size(index); index < max_index || - size_class < max_size_class; index++, size_class = - sz_index2size(index)) { + for (index = 0, size_class = sz_index2size(index); + index < max_index || size_class < max_size_class; + index++, size_class = sz_index2size(index)) { gen_index = sz_size2index(size_class); expect_true(index < max_index, "Loop conditionals should be equivalent; index=%u, " - "size_class=%zu (%#zx)", index, size_class, size_class); + "size_class=%zu (%#zx)", + index, size_class, size_class); expect_true(size_class < max_size_class, "Loop conditionals should be equivalent; index=%u, " - "size_class=%zu (%#zx)", index, size_class, size_class); + "size_class=%zu (%#zx)", + index, size_class, size_class); expect_u_eq(index, gen_index, "sz_size2index() does not reverse sz_index2size(): index=%u" @@ -51,29 +56,30 @@ TEST_BEGIN(test_size_classes) { " --> size_class=%zu --> index=%u --> size_class=%zu", index, size_class, gen_index, sz_index2size(gen_index)); - expect_u_eq(index+1, sz_size2index(size_class+1), + expect_u_eq(index + 1, sz_size2index(size_class + 1), "Next size_class does not round up properly"); - expect_zu_eq(size_class, (index > 0) ? - sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1), + expect_zu_eq(size_class, + (index > 0) ? sz_s2u(sz_index2size(index - 1) + 1) + : sz_s2u(1), "sz_s2u() does not round up to size class"); - expect_zu_eq(size_class, sz_s2u(size_class-1), + expect_zu_eq(size_class, sz_s2u(size_class - 1), "sz_s2u() does not round up to size class"); expect_zu_eq(size_class, sz_s2u(size_class), "sz_s2u() does not compute same size class"); - expect_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1), + expect_zu_eq(sz_s2u(size_class + 1), sz_index2size(index + 1), "sz_s2u() does not round up to next size class"); } expect_u_eq(index, sz_size2index(sz_index2size(index)), "sz_size2index() does not reverse sz_index2size()"); - expect_zu_eq(max_size_class, sz_index2size( - sz_size2index(max_size_class)), + expect_zu_eq(max_size_class, + sz_index2size(sz_size2index(max_size_class)), "sz_index2size() does not reverse sz_size2index()"); - expect_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1), + expect_zu_eq(size_class, sz_s2u(sz_index2size(index - 1) + 1), "sz_s2u() does not round up to size class"); - expect_zu_eq(size_class, sz_s2u(size_class-1), + expect_zu_eq(size_class, sz_s2u(size_class - 1), "sz_s2u() does not round up to size class"); expect_zu_eq(size_class, sz_s2u(size_class), "sz_s2u() does not compute same size class"); @@ -115,31 +121,33 @@ TEST_BEGIN(test_grow_slow_size_classes) { TEST_END TEST_BEGIN(test_psize_classes) { - size_t size_class, max_psz; + size_t size_class, max_psz; pszind_t pind, max_pind; max_psz = get_max_size_class() + PAGE; max_pind = sz_psz2ind(max_psz); for (pind = 0, size_class = sz_pind2sz(pind); - pind < max_pind || size_class < max_psz; - pind++, size_class = sz_pind2sz(pind)) { + pind < max_pind || size_class < max_psz; + pind++, size_class = sz_pind2sz(pind)) { expect_true(pind < max_pind, "Loop conditionals should be equivalent; pind=%u, " - "size_class=%zu (%#zx)", pind, size_class, size_class); + "size_class=%zu (%#zx)", + pind, size_class, size_class); expect_true(size_class < max_psz, "Loop conditionals should be equivalent; pind=%u, " - "size_class=%zu (%#zx)", pind, size_class, size_class); + "size_class=%zu (%#zx)", + pind, size_class, size_class); expect_u_eq(pind, sz_psz2ind(size_class), "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->" - " size_class=%zu --> pind=%u --> size_class=%zu", pind, - size_class, sz_psz2ind(size_class), + " size_class=%zu --> pind=%u --> size_class=%zu", + pind, size_class, sz_psz2ind(size_class), sz_pind2sz(sz_psz2ind(size_class))); expect_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)), "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->" - " size_class=%zu --> pind=%u --> size_class=%zu", pind, - size_class, sz_psz2ind(size_class), + " size_class=%zu --> pind=%u --> size_class=%zu", + pind, size_class, sz_psz2ind(size_class), sz_pind2sz(sz_psz2ind(size_class))); if (size_class == SC_LARGE_MAXCLASS) { @@ -150,14 +158,15 @@ TEST_BEGIN(test_psize_classes) { "Next size_class does not round up properly"); } - expect_zu_eq(size_class, (pind > 0) ? - sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), + expect_zu_eq(size_class, + (pind > 0) ? sz_psz2u(sz_pind2sz(pind - 1) + 1) + : sz_psz2u(1), "sz_psz2u() does not round up to size class"); - expect_zu_eq(size_class, sz_psz2u(size_class-1), + expect_zu_eq(size_class, sz_psz2u(size_class - 1), "sz_psz2u() does not round up to size class"); expect_zu_eq(size_class, sz_psz2u(size_class), "sz_psz2u() does not compute same size class"); - expect_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1), + expect_zu_eq(sz_psz2u(size_class + 1), sz_pind2sz(pind + 1), "sz_psz2u() does not round up to next size class"); } @@ -166,9 +175,9 @@ TEST_BEGIN(test_psize_classes) { expect_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)), "sz_pind2sz() does not reverse sz_psz2ind()"); - expect_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1), + expect_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind - 1) + 1), "sz_psz2u() does not round up to size class"); - expect_zu_eq(size_class, sz_psz2u(size_class-1), + expect_zu_eq(size_class, sz_psz2u(size_class - 1), "sz_psz2u() does not round up to size class"); expect_zu_eq(size_class, sz_psz2u(size_class), "sz_psz2u() does not compute same size class"); @@ -181,31 +190,31 @@ TEST_BEGIN(test_overflow) { max_size_class = get_max_size_class(); max_psz = max_size_class + PAGE; - expect_u_eq(sz_size2index(max_size_class+1), SC_NSIZES, + expect_u_eq(sz_size2index(max_size_class + 1), SC_NSIZES, "sz_size2index() should return NSIZES on overflow"); - expect_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES, + expect_u_eq(sz_size2index(ZU(PTRDIFF_MAX) + 1), SC_NSIZES, "sz_size2index() should return NSIZES on overflow"); expect_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES, "sz_size2index() should return NSIZES on overflow"); - expect_zu_eq(sz_s2u(max_size_class+1), 0, + expect_zu_eq(sz_s2u(max_size_class + 1), 0, "sz_s2u() should return 0 for unsupported size"); - expect_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0, + expect_zu_eq(sz_s2u(ZU(PTRDIFF_MAX) + 1), 0, "sz_s2u() should return 0 for unsupported size"); - expect_zu_eq(sz_s2u(SIZE_T_MAX), 0, - "sz_s2u() should return 0 on overflow"); + expect_zu_eq( + sz_s2u(SIZE_T_MAX), 0, "sz_s2u() should return 0 on overflow"); - expect_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES, + expect_u_eq(sz_psz2ind(max_size_class + 1), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); - expect_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES, + expect_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX) + 1), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); expect_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); - expect_zu_eq(sz_psz2u(max_size_class+1), max_psz, + expect_zu_eq(sz_psz2u(max_size_class + 1), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported" " size"); - expect_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz, + expect_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX) + 1), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported " "size"); expect_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz, @@ -215,9 +224,6 @@ TEST_END int main(void) { - return test( - test_size_classes, - test_grow_slow_size_classes, - test_psize_classes, - test_overflow); + return test(test_size_classes, test_grow_slow_size_classes, + test_psize_classes, test_overflow); } diff --git a/test/unit/slab.c b/test/unit/slab.c index 70fc5c7d..5c48e762 100644 --- a/test/unit/slab.c +++ b/test/unit/slab.c @@ -6,23 +6,22 @@ TEST_BEGIN(test_arena_slab_regind) { szind_t binind; for (binind = 0; binind < SC_NBINS; binind++) { - size_t regind; - edata_t slab; + size_t regind; + edata_t slab; const bin_info_t *bin_info = &bin_infos[binind]; edata_init(&slab, INVALID_ARENA_IND, mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)), - bin_info->slab_size, true, - binind, 0, extent_state_active, false, true, EXTENT_PAI_PAC, - EXTENT_NOT_HEAD); - expect_ptr_not_null(edata_addr_get(&slab), - "Unexpected malloc() failure"); + bin_info->slab_size, true, binind, 0, extent_state_active, + false, true, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); + expect_ptr_not_null( + edata_addr_get(&slab), "Unexpected malloc() failure"); arena_dalloc_bin_locked_info_t dalloc_info; arena_dalloc_bin_locked_begin(&dalloc_info, binind); for (regind = 0; regind < bin_info->nregs; regind++) { - void *reg = (void *)((uintptr_t)edata_addr_get(&slab) + - (bin_info->reg_size * regind)); - expect_zu_eq(arena_slab_regind(&dalloc_info, binind, - &slab, reg), + void *reg = (void *)((uintptr_t)edata_addr_get(&slab) + + (bin_info->reg_size * regind)); + expect_zu_eq( + arena_slab_regind(&dalloc_info, binind, &slab, reg), regind, "Incorrect region index computed for size %zu", bin_info->reg_size); @@ -34,6 +33,5 @@ TEST_END int main(void) { - return test( - test_arena_slab_regind); + return test(test_arena_slab_regind); } diff --git a/test/unit/smoothstep.c b/test/unit/smoothstep.c index 588c9f44..3686ca74 100644 --- a/test/unit/smoothstep.c +++ b/test/unit/smoothstep.c @@ -1,9 +1,8 @@ #include "test/jemalloc_test.h" static const uint64_t smoothstep_tab[] = { -#define STEP(step, h, x, y) \ - h, - SMOOTHSTEP +#define STEP(step, h, x, y) h, + SMOOTHSTEP #undef STEP }; @@ -23,14 +22,14 @@ TEST_BEGIN(test_smoothstep_integral) { sum += smoothstep_tab[i]; } - max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); + max = (KQU(1) << (SMOOTHSTEP_BFP - 1)) * (SMOOTHSTEP_NSTEPS + 1); min = max - SMOOTHSTEP_NSTEPS; - expect_u64_ge(sum, min, - "Integral too small, even accounting for truncation"); + expect_u64_ge( + sum, min, "Integral too small, even accounting for truncation"); expect_u64_le(sum, max, "Integral exceeds 1/2"); if (false) { - malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", + malloc_printf("%" FMTu64 " ulps under 1/2 (limit %d)\n", max - sum, SMOOTHSTEP_NSTEPS); } } @@ -52,7 +51,7 @@ TEST_BEGIN(test_smoothstep_monotonic) { expect_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); prev_h = h; } - expect_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], + expect_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS - 1], (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); } TEST_END @@ -74,19 +73,21 @@ TEST_BEGIN(test_smoothstep_slope) { uint64_t delta = h - prev_h; expect_u64_ge(delta, prev_delta, "Slope must monotonically increase in 0.0 <= x <= 0.5, " - "i=%u", i); + "i=%u", + i); prev_h = h; prev_delta = delta; } prev_h = KQU(1) << SMOOTHSTEP_BFP; prev_delta = 0; - for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { + for (i = SMOOTHSTEP_NSTEPS - 1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { uint64_t h = smoothstep_tab[i]; uint64_t delta = prev_h - h; expect_u64_ge(delta, prev_delta, "Slope must monotonically decrease in 0.5 <= x <= 1.0, " - "i=%u", i); + "i=%u", + i); prev_h = h; prev_delta = delta; } @@ -95,8 +96,6 @@ TEST_END int main(void) { - return test( - test_smoothstep_integral, - test_smoothstep_monotonic, + return test(test_smoothstep_integral, test_smoothstep_monotonic, test_smoothstep_slope); } diff --git a/test/unit/spin.c b/test/unit/spin.c index b965f742..6dbd0dd1 100644 --- a/test/unit/spin.c +++ b/test/unit/spin.c @@ -13,6 +13,5 @@ TEST_END int main(void) { - return test( - test_spin); + return test(test_spin); } diff --git a/test/unit/stats.c b/test/unit/stats.c index 584a582f..26516fa8 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -4,13 +4,14 @@ #define STRINGIFY(x) STRINGIFY_HELPER(x) TEST_BEGIN(test_stats_summary) { - size_t sz, allocated, active, resident, mapped, - metadata, metadata_edata, metadata_rtree; + size_t sz, allocated, active, resident, mapped, metadata, + metadata_edata, metadata_rtree; int expected = config_stats ? 0 : ENOENT; sz = sizeof(size_t); - expect_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, - 0), expected, "Unexpected mallctl() result"); + expect_d_eq( + mallctl("stats.allocated", (void *)&allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0), expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0), @@ -21,17 +22,19 @@ TEST_BEGIN(test_stats_summary) { expect_d_eq(mallctl("stats.metadata", (void *)&metadata, &sz, NULL, 0), expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.metadata_edata", (void *)&metadata_edata, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.metadata_rtree", (void *)&metadata_rtree, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); if (config_stats) { expect_zu_le(allocated, active, "allocated should be no larger than active"); - expect_zu_lt(active, resident, - "active should be less than resident"); - expect_zu_lt(active, mapped, - "active should be less than mapped"); + expect_zu_lt( + active, resident, "active should be less than resident"); + expect_zu_lt( + active, mapped, "active should be less than mapped"); expect_zu_le(metadata_edata + metadata_rtree, metadata, "the sum of metadata_edata and metadata_rtree " "should be no larger than metadata"); @@ -40,12 +43,12 @@ TEST_BEGIN(test_stats_summary) { TEST_END TEST_BEGIN(test_stats_large) { - void *p; + void *p; uint64_t epoch; - size_t allocated; + size_t allocated; uint64_t nmalloc, ndalloc, nrequests; - size_t sz; - int expected = config_stats ? 0 : ENOENT; + size_t sz; + int expected = config_stats ? 0 : ENOENT; p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0)); expect_ptr_not_null(p, "Unexpected mallocx() failure"); @@ -55,20 +58,22 @@ TEST_BEGIN(test_stats_large) { sz = sizeof(size_t); expect_d_eq(mallctl("stats.arenas.0.large.allocated", - (void *)&allocated, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.large.nrequests", - (void *)&nrequests, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&nrequests, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); if (config_stats) { - expect_zu_gt(allocated, 0, - "allocated should be greater than zero"); + expect_zu_gt( + allocated, 0, "allocated should be greater than zero"); expect_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); expect_u64_le(nmalloc, nrequests, @@ -80,18 +85,17 @@ TEST_BEGIN(test_stats_large) { TEST_END TEST_BEGIN(test_stats_arenas_summary) { - void *little, *large; + void *little, *large; uint64_t epoch; - size_t sz; - int expected = config_stats ? 0 : ENOENT; - size_t mapped; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + size_t mapped; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); expect_ptr_not_null(little, "Unexpected mallocx() failure"); - large = mallocx((1U << SC_LG_LARGE_MINCLASS), - MALLOCX_ARENA(0)); + large = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); expect_ptr_not_null(large, "Unexpected mallocx() failure"); dallocx(little, 0); @@ -106,28 +110,29 @@ TEST_BEGIN(test_stats_arenas_summary) { 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - expect_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, - 0), expected, "Unexepected mallctl() result"); + expect_d_eq( + mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); expect_d_eq(mallctl("stats.arenas.0.dirty_npurge", - (void *)&dirty_npurge, &sz, NULL, 0), expected, - "Unexepected mallctl() result"); + (void *)&dirty_npurge, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.dirty_nmadvise", - (void *)&dirty_nmadvise, &sz, NULL, 0), expected, - "Unexepected mallctl() result"); + (void *)&dirty_nmadvise, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.dirty_purged", - (void *)&dirty_purged, &sz, NULL, 0), expected, - "Unexepected mallctl() result"); + (void *)&dirty_purged, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.muzzy_npurge", - (void *)&muzzy_npurge, &sz, NULL, 0), expected, - "Unexepected mallctl() result"); + (void *)&muzzy_npurge, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise", - (void *)&muzzy_nmadvise, &sz, NULL, 0), expected, - "Unexepected mallctl() result"); + (void *)&muzzy_nmadvise, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.muzzy_purged", - (void *)&muzzy_purged, &sz, NULL, 0), expected, - "Unexepected mallctl() result"); + (void *)&muzzy_purged, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); if (config_stats) { if (!is_background_thread_enabled() && !opt_hpa) { @@ -156,10 +161,10 @@ no_lazy_lock(void) { } TEST_BEGIN(test_stats_arenas_small) { - void *p; - size_t sz, allocated; + void *p; + size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; - int expected = config_stats ? 0 : ENOENT; + int expected = config_stats ? 0 : ENOENT; no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ @@ -174,26 +179,28 @@ TEST_BEGIN(test_stats_arenas_small) { sz = sizeof(size_t); expect_d_eq(mallctl("stats.arenas.0.small.allocated", - (void *)&allocated, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); expect_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.small.nrequests", - (void *)&nrequests, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&nrequests, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); if (config_stats) { - expect_zu_gt(allocated, 0, - "allocated should be greater than zero"); - expect_u64_gt(nmalloc, 0, - "nmalloc should be no greater than zero"); + expect_zu_gt( + allocated, 0, "allocated should be greater than zero"); + expect_u64_gt( + nmalloc, 0, "nmalloc should be no greater than zero"); expect_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); - expect_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); + expect_u64_gt( + nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); @@ -201,16 +208,16 @@ TEST_BEGIN(test_stats_arenas_small) { TEST_END TEST_BEGIN(test_stats_arenas_large) { - void *p; - size_t sz, allocated, allocated_before; + void *p; + size_t sz, allocated, allocated_before; uint64_t epoch, nmalloc, ndalloc; - size_t malloc_size = (1U << (SC_LG_LARGE_MINCLASS + 1)) + 1; - int expected = config_stats ? 0 : ENOENT; + size_t malloc_size = (1U << (SC_LG_LARGE_MINCLASS + 1)) + 1; + int expected = config_stats ? 0 : ENOENT; sz = sizeof(size_t); expect_d_eq(mallctl("stats.arenas.0.large.allocated", - (void *)&allocated_before, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&allocated_before, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); p = mallocx(malloc_size, MALLOCX_ARENA(0)); expect_ptr_not_null(p, "Unexpected mallocx() failure"); @@ -219,21 +226,23 @@ TEST_BEGIN(test_stats_arenas_large) { 0, "Unexpected mallctl() failure"); expect_d_eq(mallctl("stats.arenas.0.large.allocated", - (void *)&allocated, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + &sz, NULL, 0), + expected, "Unexpected mallctl() result"); if (config_stats) { expect_zu_ge(allocated_before, 0, "allocated should be greater than zero"); expect_zu_ge(allocated - allocated_before, sz_s2u(malloc_size), "the diff between allocated should be greater than the allocation made"); - expect_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); + expect_u64_gt( + nmalloc, 0, "nmalloc should be greater than zero"); expect_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); } @@ -248,11 +257,11 @@ gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { } TEST_BEGIN(test_stats_arenas_bins) { - void *p; - size_t sz, curslabs, curregs, nonfull_slabs; + void *p; + size_t sz, curslabs, curregs, nonfull_slabs; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nslabs, nreslabs; - int expected = config_stats ? 0 : ENOENT; + int expected = config_stats ? 0 : ENOENT; /* Make sure allocation below isn't satisfied by tcache. */ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), @@ -264,8 +273,8 @@ TEST_BEGIN(test_stats_arenas_bins) { 0, "Arena creation failure"); sz = sizeof(arena_ind); expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, - (void *)&arena_ind, sizeof(arena_ind)), 0, - "Unexpected mallctl() failure"); + (void *)&arena_ind, sizeof(arena_ind)), + 0, "Unexpected mallctl() failure"); p = malloc(bin_infos[0].reg_size); expect_ptr_not_null(p, "Unexpected malloc() failure"); @@ -315,26 +324,25 @@ TEST_BEGIN(test_stats_arenas_bins) { expected, "Unexpected mallctl() result"); if (config_stats) { - expect_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); + expect_u64_gt( + nmalloc, 0, "nmalloc should be greater than zero"); expect_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); - expect_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - expect_zu_gt(curregs, 0, - "allocated should be greater than zero"); + expect_u64_gt( + nrequests, 0, "nrequests should be greater than zero"); + expect_zu_gt( + curregs, 0, "allocated should be greater than zero"); if (opt_tcache) { expect_u64_gt(nfills, 0, "At least one fill should have occurred"); expect_u64_gt(nflushes, 0, "At least one flush should have occurred"); } - expect_u64_gt(nslabs, 0, - "At least one slab should have been allocated"); + expect_u64_gt( + nslabs, 0, "At least one slab should have been allocated"); expect_zu_gt(curslabs, 0, "At least one slab should be currently allocated"); - expect_zu_eq(nonfull_slabs, 0, - "slabs_nonfull should be empty"); + expect_zu_eq(nonfull_slabs, 0, "slabs_nonfull should be empty"); } dallocx(p, 0); @@ -342,14 +350,15 @@ TEST_BEGIN(test_stats_arenas_bins) { TEST_END TEST_BEGIN(test_stats_arenas_lextents) { - void *p; + void *p; uint64_t epoch, nmalloc, ndalloc; - size_t curlextents, sz, hsize; - int expected = config_stats ? 0 : ENOENT; + size_t curlextents, sz, hsize; + int expected = config_stats ? 0 : ENOENT; sz = sizeof(size_t); - expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL, - 0), 0, "Unexpected mallctl() failure"); + expect_d_eq( + mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); p = mallocx(hsize, MALLOCX_ARENA(0)); expect_ptr_not_null(p, "Unexpected mallocx() failure"); @@ -359,19 +368,19 @@ TEST_BEGIN(test_stats_arenas_lextents) { sz = sizeof(uint64_t); expect_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc", - (void *)&nmalloc, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&nmalloc, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); expect_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc", - (void *)&ndalloc, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&ndalloc, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); sz = sizeof(size_t); expect_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents", - (void *)&curlextents, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + (void *)&curlextents, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); if (config_stats) { - expect_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); + expect_u64_gt( + nmalloc, 0, "nmalloc should be greater than zero"); expect_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); expect_u64_gt(curlextents, 0, @@ -385,35 +394,37 @@ TEST_END static void test_tcache_bytes_for_usize(size_t usize) { uint64_t epoch; - size_t tcache_bytes, tcache_stashed_bytes; - size_t sz = sizeof(tcache_bytes); + size_t tcache_bytes, tcache_stashed_bytes; + size_t sz = sizeof(tcache_bytes); void *ptr = mallocx(usize, 0); expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); - assert_d_eq(mallctl( - "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes", - &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctl( - "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) - ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("stats.arenas." STRINGIFY( + MALLCTL_ARENAS_ALL) ".tcache_bytes", + &tcache_bytes, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("stats.arenas." STRINGIFY( + MALLCTL_ARENAS_ALL) ".tcache_stashed_bytes", + &tcache_stashed_bytes, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); size_t tcache_bytes_before = tcache_bytes + tcache_stashed_bytes; dallocx(ptr, 0); expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); - assert_d_eq(mallctl( - "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes", - &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctl( - "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) - ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("stats.arenas." STRINGIFY( + MALLCTL_ARENAS_ALL) ".tcache_bytes", + &tcache_bytes, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("stats.arenas." STRINGIFY( + MALLCTL_ARENAS_ALL) ".tcache_stashed_bytes", + &tcache_stashed_bytes, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); size_t tcache_bytes_after = tcache_bytes + tcache_stashed_bytes; - assert_zu_eq(tcache_bytes_after - tcache_bytes_before, - usize, "Incorrectly attributed a free"); + assert_zu_eq(tcache_bytes_after - tcache_bytes_before, usize, + "Incorrectly attributed a free"); } TEST_BEGIN(test_stats_tcache_bytes_small) { @@ -436,14 +447,9 @@ TEST_END int main(void) { - return test_no_reentrancy( - test_stats_summary, - test_stats_large, - test_stats_arenas_summary, - test_stats_arenas_small, - test_stats_arenas_large, - test_stats_arenas_bins, - test_stats_arenas_lextents, - test_stats_tcache_bytes_small, + return test_no_reentrancy(test_stats_summary, test_stats_large, + test_stats_arenas_summary, test_stats_arenas_small, + test_stats_arenas_large, test_stats_arenas_bins, + test_stats_arenas_lextents, test_stats_tcache_bytes_small, test_stats_tcache_bytes_large); } diff --git a/test/unit/stats_print.c b/test/unit/stats_print.c index 3b317753..e611369c 100644 --- a/test/unit/stats_print.c +++ b/test/unit/stats_print.c @@ -21,22 +21,22 @@ typedef enum { typedef struct parser_s parser_t; typedef struct { - parser_t *parser; - token_type_t token_type; - size_t pos; - size_t len; - size_t line; - size_t col; + parser_t *parser; + token_type_t token_type; + size_t pos; + size_t len; + size_t line; + size_t col; } token_t; struct parser_s { - bool verbose; - char *buf; /* '\0'-terminated. */ - size_t len; /* Number of characters preceding '\0' in buf. */ - size_t pos; - size_t line; - size_t col; - token_t token; + bool verbose; + char *buf; /* '\0'-terminated. */ + size_t len; /* Number of characters preceding '\0' in buf. */ + size_t pos; + size_t line; + size_t col; + token_t token; }; static void @@ -63,12 +63,12 @@ token_error(token_t *token) { token->line, token->col); break; default: - malloc_printf("%zu:%zu: Unexpected token: ", token->line, - token->col); + malloc_printf( + "%zu:%zu: Unexpected token: ", token->line, token->col); break; } - UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO, - &token->parser->buf[token->pos], token->len); + UNUSED ssize_t err = malloc_write_fd( + STDERR_FILENO, &token->parser->buf[token->pos], token->len); malloc_printf("\n"); } @@ -92,9 +92,9 @@ parser_fini(parser_t *parser) { static bool parser_append(parser_t *parser, const char *str) { size_t len = strlen(str); - char *buf = (parser->buf == NULL) ? mallocx(len + 1, - MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1, - MALLOCX_TCACHE_NONE); + char *buf = (parser->buf == NULL) + ? mallocx(len + 1, MALLOCX_TCACHE_NONE) + : rallocx(parser->buf, parser->len + len + 1, MALLOCX_TCACHE_NONE); if (buf == NULL) { return true; } @@ -109,9 +109,19 @@ parser_tokenize(parser_t *parser) { enum { STATE_START, STATE_EOI, - STATE_N, STATE_NU, STATE_NUL, STATE_NULL, - STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE, - STATE_T, STATE_TR, STATE_TRU, STATE_TRUE, + STATE_N, + STATE_NU, + STATE_NUL, + STATE_NULL, + STATE_F, + STATE_FA, + STATE_FAL, + STATE_FALS, + STATE_FALSE, + STATE_T, + STATE_TR, + STATE_TRU, + STATE_TRUE, STATE_LBRACKET, STATE_RBRACKET, STATE_LBRACE, @@ -120,7 +130,10 @@ parser_tokenize(parser_t *parser) { STATE_COMMA, STATE_CHARS, STATE_CHAR_ESCAPE, - STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD, + STATE_CHAR_U, + STATE_CHAR_UD, + STATE_CHAR_UDD, + STATE_CHAR_UDDD, STATE_STRING, STATE_MINUS, STATE_LEADING_ZERO, @@ -132,12 +145,12 @@ parser_tokenize(parser_t *parser) { STATE_EXP_DIGITS, STATE_ACCEPT } state = STATE_START; - size_t token_pos JEMALLOC_CC_SILENCE_INIT(0); + size_t token_pos JEMALLOC_CC_SILENCE_INIT(0); size_t token_line JEMALLOC_CC_SILENCE_INIT(1); - size_t token_col JEMALLOC_CC_SILENCE_INIT(0); + size_t token_col JEMALLOC_CC_SILENCE_INIT(0); - expect_zu_le(parser->pos, parser->len, - "Position is past end of buffer"); + expect_zu_le( + parser->pos, parser->len, "Position is past end of buffer"); while (state != STATE_ACCEPT) { char c = parser->buf[parser->pos]; @@ -148,7 +161,11 @@ parser_tokenize(parser_t *parser) { token_line = parser->line; token_col = parser->col; switch (c) { - case ' ': case '\b': case '\n': case '\r': case '\t': + case ' ': + case '\b': + case '\n': + case '\r': + case '\t': break; case '\0': state = STATE_EOI; @@ -189,21 +206,29 @@ parser_tokenize(parser_t *parser) { case '0': state = STATE_LEADING_ZERO; break; - case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': state = STATE_DIGITS; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_EOI: - token_init(&parser->token, parser, - TOKEN_TYPE_EOI, token_pos, parser->pos - - token_pos, token_line, token_col); + token_init(&parser->token, parser, TOKEN_TYPE_EOI, + token_pos, parser->pos - token_pos, token_line, + token_col); state = STATE_ACCEPT; break; case STATE_N: @@ -213,8 +238,9 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -225,8 +251,9 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -237,22 +264,32 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_NULL: switch (c) { - case ' ': case '\b': case '\n': case '\r': case '\t': + case ' ': + case '\b': + case '\n': + case '\r': + case '\t': case '\0': - case '[': case ']': case '{': case '}': case ':': + case '[': + case ']': + case '{': + case '}': + case ':': case ',': break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_NULL, @@ -267,8 +304,9 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -279,8 +317,9 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -291,8 +330,9 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -303,27 +343,37 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_FALSE: switch (c) { - case ' ': case '\b': case '\n': case '\r': case '\t': + case ' ': + case '\b': + case '\n': + case '\r': + case '\t': case '\0': - case '[': case ']': case '{': case '}': case ':': + case '[': + case ']': + case '{': + case '}': + case ':': case ',': break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } - token_init(&parser->token, parser, - TOKEN_TYPE_FALSE, token_pos, parser->pos - - token_pos, token_line, token_col); + token_init(&parser->token, parser, TOKEN_TYPE_FALSE, + token_pos, parser->pos - token_pos, token_line, + token_col); state = STATE_ACCEPT; break; case STATE_T: @@ -333,8 +383,9 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -345,8 +396,9 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -357,22 +409,32 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_TRUE: switch (c) { - case ' ': case '\b': case '\n': case '\r': case '\t': + case ' ': + case '\b': + case '\n': + case '\r': + case '\t': case '\0': - case '[': case ']': case '{': case '}': case ':': + case '[': + case ']': + case '{': + case '}': + case ':': case ',': break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } token_init(&parser->token, parser, TOKEN_TYPE_TRUE, @@ -424,16 +486,42 @@ parser_tokenize(parser_t *parser) { case '"': state = STATE_STRING; break; - case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: - case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: - case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: - case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13: - case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: - case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: - case 0x1e: case 0x1f: + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + case 0x0e: + case 0x0f: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: + case 0x1e: + case 0x1f: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; default: break; @@ -441,8 +529,13 @@ parser_tokenize(parser_t *parser) { break; case STATE_CHAR_ESCAPE: switch (c) { - case '"': case '\\': case '/': case 'b': case 'n': - case 'r': case 't': + case '"': + case '\\': + case '/': + case 'b': + case 'n': + case 'r': + case 't': state = STATE_CHARS; break; case 'u': @@ -450,76 +543,145 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_CHAR_U: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'a': case 'b': case 'c': case 'd': case 'e': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': case 'f': - case 'A': case 'B': case 'C': case 'D': case 'E': + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': case 'F': state = STATE_CHAR_UD; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_CHAR_UD: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'a': case 'b': case 'c': case 'd': case 'e': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': case 'f': - case 'A': case 'B': case 'C': case 'D': case 'E': + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': case 'F': state = STATE_CHAR_UDD; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_CHAR_UDD: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'a': case 'b': case 'c': case 'd': case 'e': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': case 'f': - case 'A': case 'B': case 'C': case 'D': case 'E': + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': case 'F': state = STATE_CHAR_UDDD; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_CHAR_UDDD: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'a': case 'b': case 'c': case 'd': case 'e': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case 'a': + case 'b': + case 'c': + case 'd': + case 'e': case 'f': - case 'A': case 'B': case 'C': case 'D': case 'E': + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': case 'F': state = STATE_CHARS; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -534,14 +696,22 @@ parser_tokenize(parser_t *parser) { case '0': state = STATE_LEADING_ZERO; break; - case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': state = STATE_DIGITS; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; @@ -552,95 +722,152 @@ parser_tokenize(parser_t *parser) { break; default: token_init(&parser->token, parser, - TOKEN_TYPE_NUMBER, token_pos, parser->pos - - token_pos, token_line, token_col); + TOKEN_TYPE_NUMBER, token_pos, + parser->pos - token_pos, token_line, + token_col); state = STATE_ACCEPT; break; } break; case STATE_DIGITS: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': break; case '.': state = STATE_DECIMAL; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_NUMBER, token_pos, parser->pos - - token_pos, token_line, token_col); + TOKEN_TYPE_NUMBER, token_pos, + parser->pos - token_pos, token_line, + token_col); state = STATE_ACCEPT; break; } break; case STATE_DECIMAL: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': state = STATE_FRAC_DIGITS; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_FRAC_DIGITS: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': break; - case 'e': case 'E': + case 'e': + case 'E': state = STATE_EXP; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_NUMBER, token_pos, parser->pos - - token_pos, token_line, token_col); + TOKEN_TYPE_NUMBER, token_pos, + parser->pos - token_pos, token_line, + token_col); state = STATE_ACCEPT; break; } break; case STATE_EXP: switch (c) { - case '-': case '+': + case '-': + case '+': state = STATE_EXP_SIGN; break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': state = STATE_EXP_DIGITS; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_EXP_SIGN: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': state = STATE_EXP_DIGITS; break; default: token_init(&parser->token, parser, - TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 - - token_pos, token_line, token_col); + TOKEN_TYPE_ERROR, token_pos, + parser->pos + 1 - token_pos, token_line, + token_col); return true; } break; case STATE_EXP_DIGITS: switch (c) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': break; default: token_init(&parser->token, parser, - TOKEN_TYPE_NUMBER, token_pos, parser->pos - - token_pos, token_line, token_col); + TOKEN_TYPE_NUMBER, token_pos, + parser->pos - token_pos, token_line, + token_col); state = STATE_ACCEPT; break; } @@ -662,8 +889,8 @@ parser_tokenize(parser_t *parser) { return false; } -static bool parser_parse_array(parser_t *parser); -static bool parser_parse_object(parser_t *parser); +static bool parser_parse_array(parser_t *parser); +static bool parser_parse_object(parser_t *parser); static bool parser_parse_value(parser_t *parser) { @@ -824,80 +1051,80 @@ label_error: } TEST_BEGIN(test_json_parser) { - size_t i; + size_t i; const char *invalid_inputs[] = { - /* Tokenizer error case tests. */ - "{ \"string\": X }", - "{ \"string\": nXll }", - "{ \"string\": nuXl }", - "{ \"string\": nulX }", - "{ \"string\": nullX }", - "{ \"string\": fXlse }", - "{ \"string\": faXse }", - "{ \"string\": falXe }", - "{ \"string\": falsX }", - "{ \"string\": falseX }", - "{ \"string\": tXue }", - "{ \"string\": trXe }", - "{ \"string\": truX }", - "{ \"string\": trueX }", - "{ \"string\": \"\n\" }", - "{ \"string\": \"\\z\" }", - "{ \"string\": \"\\uX000\" }", - "{ \"string\": \"\\u0X00\" }", - "{ \"string\": \"\\u00X0\" }", - "{ \"string\": \"\\u000X\" }", - "{ \"string\": -X }", - "{ \"string\": 0.X }", - "{ \"string\": 0.0eX }", - "{ \"string\": 0.0e+X }", + /* Tokenizer error case tests. */ + "{ \"string\": X }", + "{ \"string\": nXll }", + "{ \"string\": nuXl }", + "{ \"string\": nulX }", + "{ \"string\": nullX }", + "{ \"string\": fXlse }", + "{ \"string\": faXse }", + "{ \"string\": falXe }", + "{ \"string\": falsX }", + "{ \"string\": falseX }", + "{ \"string\": tXue }", + "{ \"string\": trXe }", + "{ \"string\": truX }", + "{ \"string\": trueX }", + "{ \"string\": \"\n\" }", + "{ \"string\": \"\\z\" }", + "{ \"string\": \"\\uX000\" }", + "{ \"string\": \"\\u0X00\" }", + "{ \"string\": \"\\u00X0\" }", + "{ \"string\": \"\\u000X\" }", + "{ \"string\": -X }", + "{ \"string\": 0.X }", + "{ \"string\": 0.0eX }", + "{ \"string\": 0.0e+X }", - /* Parser error test cases. */ - "{\"string\": }", - "{\"string\" }", - "{\"string\": [ 0 }", - "{\"string\": {\"a\":0, 1 } }", - "{\"string\": {\"a\":0: } }", - "{", - "{}{", + /* Parser error test cases. */ + "{\"string\": }", + "{\"string\" }", + "{\"string\": [ 0 }", + "{\"string\": {\"a\":0, 1 } }", + "{\"string\": {\"a\":0: } }", + "{", + "{}{", }; const char *valid_inputs[] = { - /* Token tests. */ - "null", - "false", - "true", - "{}", - "{\"a\": 0}", - "[]", - "[0, 1]", - "0", - "1", - "10", - "-10", - "10.23", - "10.23e4", - "10.23e-4", - "10.23e+4", - "10.23E4", - "10.23E-4", - "10.23E+4", - "-10.23", - "-10.23e4", - "-10.23e-4", - "-10.23e+4", - "-10.23E4", - "-10.23E-4", - "-10.23E+4", - "\"value\"", - "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"", + /* Token tests. */ + "null", + "false", + "true", + "{}", + "{\"a\": 0}", + "[]", + "[0, 1]", + "0", + "1", + "10", + "-10", + "10.23", + "10.23e4", + "10.23e-4", + "10.23e+4", + "10.23E4", + "10.23E-4", + "10.23E+4", + "-10.23", + "-10.23e4", + "-10.23e-4", + "-10.23e+4", + "-10.23E4", + "-10.23E-4", + "-10.23E+4", + "\"value\"", + "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"", - /* Parser test with various nesting. */ - "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}", + /* Parser test with various nesting. */ + "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}", }; - for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) { + for (i = 0; i < sizeof(invalid_inputs) / sizeof(const char *); i++) { const char *input = invalid_inputs[i]; - parser_t parser; + parser_t parser; parser_init(&parser, false); expect_false(parser_append(&parser, input), "Unexpected input appending failure"); @@ -906,9 +1133,9 @@ TEST_BEGIN(test_json_parser) { parser_fini(&parser); } - for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) { + for (i = 0; i < sizeof(valid_inputs) / sizeof(const char *); i++) { const char *input = valid_inputs[i]; - parser_t parser; + parser_t parser; parser_init(&parser, true); expect_false(parser_append(&parser, input), "Unexpected input appending failure"); @@ -929,27 +1156,27 @@ write_cb(void *opaque, const char *str) { TEST_BEGIN(test_stats_print_json) { const char *opts[] = { - "J", - "Jg", - "Jm", - "Jd", - "Jmd", - "Jgd", - "Jgm", - "Jgmd", - "Ja", - "Jb", - "Jl", - "Jx", - "Jbl", - "Jal", - "Jab", - "Jabl", - "Jax", - "Jbx", - "Jlx", - "Jablx", - "Jgmdablx", + "J", + "Jg", + "Jm", + "Jd", + "Jmd", + "Jgd", + "Jgm", + "Jgmd", + "Ja", + "Jb", + "Jl", + "Jx", + "Jbl", + "Jal", + "Jab", + "Jabl", + "Jax", + "Jbx", + "Jlx", + "Jablx", + "Jgmdablx", }; unsigned arena_ind, i; @@ -962,23 +1189,27 @@ TEST_BEGIN(test_stats_print_json) { case 1: { size_t sz = sizeof(arena_ind); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, - &sz, NULL, 0), 0, "Unexpected mallctl failure"); + &sz, NULL, 0), + 0, "Unexpected mallctl failure"); break; - } case 2: { + } + case 2: { size_t mib[3]; - size_t miblen = sizeof(mib)/sizeof(size_t); - expect_d_eq(mallctlnametomib("arena.0.destroy", - mib, &miblen), 0, - "Unexpected mallctlnametomib failure"); + size_t miblen = sizeof(mib) / sizeof(size_t); + expect_d_eq( + mallctlnametomib("arena.0.destroy", mib, &miblen), + 0, "Unexpected mallctlnametomib failure"); mib[1] = arena_ind; - expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, - 0), 0, "Unexpected mallctlbymib failure"); + expect_d_eq( + mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib failure"); break; - } default: + } + default: not_reached(); } - for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) { + for (j = 0; j < sizeof(opts) / sizeof(const char *); j++) { parser_t parser; parser_init(&parser, true); @@ -993,7 +1224,5 @@ TEST_END int main(void) { - return test( - test_json_parser, - test_stats_print_json); + return test(test_json_parser, test_stats_print_json); } diff --git a/test/unit/sz.c b/test/unit/sz.c index 8ae04b92..fa2b8dc0 100644 --- a/test/unit/sz.c +++ b/test/unit/sz.c @@ -10,8 +10,8 @@ TEST_BEGIN(test_sz_psz2ind) { for (size_t i = 0; i < SC_NGROUP; i++) { for (size_t psz = i * PAGE + 1; psz <= (i + 1) * PAGE; psz++) { pszind_t ind = sz_psz2ind(psz); - expect_zu_eq(ind, i, "Got %u as sz_psz2ind of %zu", ind, - psz); + expect_zu_eq( + ind, i, "Got %u as sz_psz2ind of %zu", ind, psz); } } @@ -25,15 +25,14 @@ TEST_BEGIN(test_sz_psz2ind) { */ size_t base_psz = 1 << (SC_LG_NGROUP + LG_PAGE); size_t base_ind = 0; - while (base_ind < SC_NSIZES && - reg_size_compute(data.sc[base_ind].lg_base, - data.sc[base_ind].lg_delta, - data.sc[base_ind].ndelta) < base_psz) { + while (base_ind < SC_NSIZES + && reg_size_compute(data.sc[base_ind].lg_base, + data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta) + < base_psz) { base_ind++; } - expect_zu_eq( - reg_size_compute(data.sc[base_ind].lg_base, - data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta), + expect_zu_eq(reg_size_compute(data.sc[base_ind].lg_base, + data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta), base_psz, "Size class equal to %zu not found", base_psz); /* * Test different sizes falling into groups after the 'base'. The @@ -42,21 +41,21 @@ TEST_BEGIN(test_sz_psz2ind) { base_ind -= SC_NGROUP; for (size_t psz = base_psz; psz <= 64 * 1024 * 1024; psz += PAGE / 3) { pszind_t ind = sz_psz2ind(psz); - sc_t gt_sc = data.sc[ind + base_ind]; + sc_t gt_sc = data.sc[ind + base_ind]; expect_zu_gt(psz, - reg_size_compute(gt_sc.lg_base, gt_sc.lg_delta, - gt_sc.ndelta), + reg_size_compute( + gt_sc.lg_base, gt_sc.lg_delta, gt_sc.ndelta), "Got %u as sz_psz2ind of %zu", ind, psz); sc_t le_sc = data.sc[ind + base_ind + 1]; expect_zu_le(psz, - reg_size_compute(le_sc.lg_base, le_sc.lg_delta, - le_sc.ndelta), + reg_size_compute( + le_sc.lg_base, le_sc.lg_delta, le_sc.ndelta), "Got %u as sz_psz2ind of %zu", ind, psz); } pszind_t max_ind = sz_psz2ind(SC_LARGE_MAXCLASS + 1); - expect_lu_eq(max_ind, SC_NPSIZES, - "Got %u as sz_psz2ind of %llu", max_ind, SC_LARGE_MAXCLASS); + expect_lu_eq(max_ind, SC_NPSIZES, "Got %u as sz_psz2ind of %llu", + max_ind, SC_LARGE_MAXCLASS); } TEST_END diff --git a/test/unit/tcache_max.c b/test/unit/tcache_max.c index 884ee7fe..d57b2d3b 100644 --- a/test/unit/tcache_max.c +++ b/test/unit/tcache_max.c @@ -69,8 +69,8 @@ tcache_bytes_read_global(void) { static size_t tcache_bytes_read_local(void) { - size_t tcache_bytes = 0; - tsd_t *tsd = tsd_fetch(); + size_t tcache_bytes = 0; + tsd_t *tsd = tsd_fetch(); tcache_t *tcache = tcache_get(tsd); for (szind_t i = 0; i < tcache_nbins_get(tcache->tcache_slow); i++) { cache_bin_t *cache_bin = &tcache->bins[i]; @@ -98,7 +98,7 @@ test_tcache_bytes_alloc(size_t alloc_size, size_t tcache_max, size_t usize = sz_s2u(alloc_size); /* No change is expected if usize is outside of tcache_max range. */ - bool cached = (usize <= tcache_max); + bool cached = (usize <= tcache_max); ssize_t diff = cached ? usize : 0; void *ptr1 = alloc_func(alloc_size, alloc_option); @@ -186,7 +186,7 @@ TEST_BEGIN(test_tcache_max) { test_skip_if(san_uaf_detection_enabled()); unsigned arena_ind, alloc_option, dalloc_option; - size_t sz = sizeof(arena_ind); + size_t sz = sizeof(arena_ind); expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); expect_d_eq( @@ -215,12 +215,12 @@ static void validate_tcache_stack(tcache_t *tcache) { /* Assume bins[0] is enabled. */ void *tcache_stack = tcache->bins[0].stack_head; - bool expect_found = cache_bin_stack_use_thp() ? true : false; + bool expect_found = cache_bin_stack_use_thp() ? true : false; /* Walk through all blocks to see if the stack is within range. */ - base_t *base = b0get(); + base_t *base = b0get(); base_block_t *next = base->blocks; - bool found = false; + bool found = false; do { base_block_t *block = next; if ((byte_t *)tcache_stack >= (byte_t *)block @@ -237,10 +237,10 @@ validate_tcache_stack(tcache_t *tcache) { static void * tcache_check(void *arg) { - size_t old_tcache_max, new_tcache_max, min_tcache_max, sz; - unsigned tcache_nbins; - tsd_t *tsd = tsd_fetch(); - tcache_t *tcache = tsd_tcachep_get(tsd); + size_t old_tcache_max, new_tcache_max, min_tcache_max, sz; + unsigned tcache_nbins; + tsd_t *tsd = tsd_fetch(); + tcache_t *tcache = tsd_tcachep_get(tsd); tcache_slow_t *tcache_slow = tcache->tcache_slow; sz = sizeof(size_t); new_tcache_max = *(size_t *)arg; @@ -263,7 +263,7 @@ tcache_check(void *arg) { * Test an input that is not a valid size class, it should be ceiled * to a valid size class. */ - bool e0 = false, e1; + bool e0 = false, e1; size_t bool_sz = sizeof(bool); expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz, (void *)&e0, bool_sz), diff --git a/test/unit/test_hooks.c b/test/unit/test_hooks.c index 41e7bf35..47e5fa9e 100644 --- a/test/unit/test_hooks.c +++ b/test/unit/test_hooks.c @@ -32,7 +32,5 @@ TEST_END int main(void) { - return test( - unhooked_call, - hooked_call); + return test(unhooked_call, hooked_call); } diff --git a/test/unit/thread_event.c b/test/unit/thread_event.c index 66d61cd2..d886c998 100644 --- a/test/unit/thread_event.c +++ b/test/unit/thread_event.c @@ -1,20 +1,17 @@ #include "test/jemalloc_test.h" static uint32_t nuser_hook_calls; -static bool is_registered = false; +static bool is_registered = false; static void test_cb(bool is_alloc, uint64_t tallocated, uint64_t tdallocated) { ++nuser_hook_calls; } static user_hook_object_t tobj = { - .callback = &test_cb, - .interval = 10, - .is_alloc_only = false -}; + .callback = &test_cb, .interval = 10, .is_alloc_only = false}; TEST_BEGIN(test_next_event_fast) { - tsd_t *tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch(); te_ctx_t ctx; te_ctx_get(tsd, &ctx, true); @@ -23,7 +20,8 @@ TEST_BEGIN(test_next_event_fast) { te_ctx_next_event_set(tsd, &ctx, TE_NEXT_EVENT_FAST_MAX); if (!is_registered) { - is_registered = 0 == te_register_user_handler(tsd_tsdn(tsd), &tobj); + is_registered = 0 + == te_register_user_handler(tsd_tsdn(tsd), &tobj); } assert_true(is_registered || !config_stats, "Register user handler"); nuser_hook_calls = 0; @@ -35,7 +33,8 @@ TEST_BEGIN(test_next_event_fast) { /* Test next_event_fast rolling back to 0. */ void *p = malloc(16U); - assert_true(nuser_hook_calls == 1 || !config_stats, "Expected alloc call"); + assert_true( + nuser_hook_calls == 1 || !config_stats, "Expected alloc call"); assert_ptr_not_null(p, "malloc() failed"); free(p); @@ -48,6 +47,5 @@ TEST_END int main(void) { - return test( - test_next_event_fast); + return test(test_next_event_fast); } diff --git a/test/unit/ticker.c b/test/unit/ticker.c index c4147a0c..31a2b8e0 100644 --- a/test/unit/ticker.c +++ b/test/unit/ticker.c @@ -6,7 +6,7 @@ TEST_BEGIN(test_ticker_tick) { #define NREPS 2 #define NTICKS 3 ticker_t ticker; - int32_t i, j; + int32_t i, j; ticker_init(&ticker, NTICKS); for (i = 0; i < NREPS; i++) { @@ -16,12 +16,12 @@ TEST_BEGIN(test_ticker_tick) { expect_false(ticker_tick(&ticker, false), "Unexpected ticker fire (i=%d, j=%d)", i, j); } - expect_u32_eq(ticker_read(&ticker), 0, - "Expected ticker depletion"); + expect_u32_eq( + ticker_read(&ticker), 0, "Expected ticker depletion"); expect_true(ticker_tick(&ticker, false), "Expected ticker fire (i=%d)", i); - expect_u32_eq(ticker_read(&ticker), NTICKS, - "Expected ticker reset"); + expect_u32_eq( + ticker_read(&ticker), NTICKS, "Expected ticker reset"); } #undef NTICKS } @@ -34,15 +34,15 @@ TEST_BEGIN(test_ticker_ticks) { ticker_init(&ticker, NTICKS); expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); - expect_false(ticker_ticks(&ticker, NTICKS, false), - "Unexpected ticker fire"); + expect_false( + ticker_ticks(&ticker, NTICKS, false), "Unexpected ticker fire"); expect_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); - expect_true(ticker_ticks(&ticker, NTICKS, false), - "Expected ticker fire"); + expect_true( + ticker_ticks(&ticker, NTICKS, false), "Expected ticker fire"); expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); - expect_true(ticker_ticks(&ticker, NTICKS + 1, false), - "Expected ticker fire"); + expect_true( + ticker_ticks(&ticker, NTICKS + 1, false), "Expected ticker fire"); expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); #undef NTICKS } @@ -55,8 +55,8 @@ TEST_BEGIN(test_ticker_copy) { ticker_init(&ta, NTICKS); ticker_copy(&tb, &ta); expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); - expect_true(ticker_ticks(&tb, NTICKS + 1, false), - "Expected ticker fire"); + expect_true( + ticker_ticks(&tb, NTICKS + 1, false), "Expected ticker fire"); expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); ticker_tick(&ta, false); @@ -69,7 +69,7 @@ TEST_BEGIN(test_ticker_copy) { TEST_END TEST_BEGIN(test_ticker_geom) { - const int32_t ticks = 100; + const int32_t ticks = 100; const uint64_t niters = 100 * 1000; ticker_geom_t ticker; @@ -78,7 +78,7 @@ TEST_BEGIN(test_ticker_geom) { /* Just some random constant. */ uint64_t prng_state = 0x343219f93496db9fULL; for (uint64_t i = 0; i < niters; i++) { - while(!ticker_geom_tick(&ticker, &prng_state, false)) { + while (!ticker_geom_tick(&ticker, &prng_state, false)) { total_ticks++; } } @@ -87,15 +87,15 @@ TEST_BEGIN(test_ticker_geom) { * used at the time this was tested, total_ticks is 95.1% of the * expected ticks. */ - expect_u64_ge(total_ticks , niters * ticks * 9 / 10, - "Mean off by > 10%%"); - expect_u64_le(total_ticks , niters * ticks * 11 / 10, - "Mean off by > 10%%"); + expect_u64_ge( + total_ticks, niters * ticks * 9 / 10, "Mean off by > 10%%"); + expect_u64_le( + total_ticks, niters * ticks * 11 / 10, "Mean off by > 10%%"); } TEST_END TEST_BEGIN(test_ticker_delay) { - const int32_t ticks = 1000; + const int32_t ticks = 1000; const uint64_t niters = 10000; ticker_t t1; @@ -120,22 +120,19 @@ TEST_BEGIN(test_ticker_delay) { expect_false(ticker_geom_tick(&t2, &prng_state, delay), "Unexpected ticker fire"); expect_d_eq(ticker_read(&t1), 0, "Unexpected ticker value"); - expect_d_eq(ticker_geom_read(&t2), 0, "Unexpected ticker value"); + expect_d_eq( + ticker_geom_read(&t2), 0, "Unexpected ticker value"); } delay = false; expect_true(ticker_tick(&t1, delay), "Expected ticker fire"); - expect_true(ticker_geom_tick(&t2, &prng_state, delay), - "Expected ticker fire"); + expect_true( + ticker_geom_tick(&t2, &prng_state, delay), "Expected ticker fire"); } TEST_END int main(void) { - return test( - test_ticker_tick, - test_ticker_ticks, - test_ticker_copy, - test_ticker_geom, - test_ticker_delay); + return test(test_ticker_tick, test_ticker_ticks, test_ticker_copy, + test_ticker_geom, test_ticker_delay); } diff --git a/test/unit/tsd.c b/test/unit/tsd.c index bb5cd9f6..9610ceac 100644 --- a/test/unit/tsd.c +++ b/test/unit/tsd.c @@ -5,7 +5,7 @@ * be asserting that we're on one. */ static bool originally_fast; -static int data_cleanup_count; +static int data_cleanup_count; void data_cleanup(int *data) { @@ -45,7 +45,7 @@ data_cleanup(int *data) { static void * thd_start(void *arg) { - int d = (int)(uintptr_t)arg; + int d = (int)(uintptr_t)arg; void *p; /* @@ -105,11 +105,10 @@ thd_start_reincarnated(void *arg) { expect_ptr_not_null(p, "Unexpected malloc() failure"); /* Manually trigger reincarnation. */ - expect_ptr_not_null(tsd_arena_get(tsd), - "Should have tsd arena set."); + expect_ptr_not_null(tsd_arena_get(tsd), "Should have tsd arena set."); tsd_cleanup((void *)tsd); - expect_ptr_null(*tsd_arenap_get_unsafe(tsd), - "TSD arena should have been cleared."); + expect_ptr_null( + *tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared."); expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory, "TSD state should be purgatory\n"); @@ -193,7 +192,7 @@ TEST_END typedef struct { atomic_u32_t phase; - atomic_b_t error; + atomic_b_t error; } global_slow_data_t; static void * @@ -207,8 +206,8 @@ thd_start_global_slow(void *arg) { * No global slowness has happened yet; there was an error if we were * originally fast but aren't now. */ - atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), - ATOMIC_SEQ_CST); + atomic_store_b( + &data->error, originally_fast && !tsd_fast(tsd), ATOMIC_SEQ_CST); atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST); /* PHASE 2 */ @@ -241,8 +240,8 @@ thd_start_global_slow(void *arg) { * Both decrements happened; we should be fast again (if we ever * were) */ - atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), - ATOMIC_SEQ_CST); + atomic_store_b( + &data->error, originally_fast && !tsd_fast(tsd), ATOMIC_SEQ_CST); atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST); return NULL; @@ -321,10 +320,7 @@ main(void) { return test_status_fail; } - return test_no_reentrancy( - test_tsd_main_thread, - test_tsd_sub_thread, - test_tsd_sub_thread_dalloc_only, - test_tsd_reincarnation, + return test_no_reentrancy(test_tsd_main_thread, test_tsd_sub_thread, + test_tsd_sub_thread_dalloc_only, test_tsd_reincarnation, test_tsd_global_slow); } diff --git a/test/unit/uaf.c b/test/unit/uaf.c index a8433c29..25399ed0 100644 --- a/test/unit/uaf.c +++ b/test/unit/uaf.c @@ -11,7 +11,8 @@ const char *malloc_conf = TEST_SAN_UAF_ALIGN_ENABLE; static size_t san_uaf_align; static bool fake_abort_called; -void fake_abort(const char *message) { +void +fake_abort(const char *message) { (void)message; fake_abort_called = true; } @@ -24,8 +25,8 @@ test_write_after_free_pre(void) { static void test_write_after_free_post(void) { - assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - 0, "Unexpected tcache flush failure"); + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0, + "Unexpected tcache flush failure"); expect_true(fake_abort_called, "Use-after-free check didn't fire."); safety_check_set_abort(NULL); } @@ -37,9 +38,10 @@ uaf_detection_enabled(void) { } ssize_t lg_san_uaf_align; - size_t sz = sizeof(lg_san_uaf_align); - assert_d_eq(mallctl("opt.lg_san_uaf_align", &lg_san_uaf_align, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + size_t sz = sizeof(lg_san_uaf_align); + assert_d_eq( + mallctl("opt.lg_san_uaf_align", &lg_san_uaf_align, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); if (lg_san_uaf_align < 0) { return false; } @@ -48,8 +50,9 @@ uaf_detection_enabled(void) { bool tcache_enabled; sz = sizeof(tcache_enabled); - assert_d_eq(mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + assert_d_eq( + mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); if (!tcache_enabled) { return false; } @@ -69,10 +72,10 @@ read_tcache_stashed_bytes(unsigned arena_ind) { size_t tcache_stashed_bytes; size_t sz = sizeof(tcache_stashed_bytes); - assert_d_eq(mallctl( - "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) - ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("stats.arenas." STRINGIFY( + MALLCTL_ARENAS_ALL) ".tcache_stashed_bytes", + &tcache_stashed_bytes, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); return tcache_stashed_bytes; } @@ -91,17 +94,17 @@ test_use_after_free(size_t alloc_size, bool write_after_free) { * make use-after-free tolerable. */ unsigned arena_ind = do_arena_create(-1, -1); - int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; size_t n_max = san_uaf_align * 2; void **items = mallocx(n_max * sizeof(void *), flags); assert_ptr_not_null(items, "Unexpected mallocx failure"); - bool found = false; + bool found = false; size_t iter = 0; - char magic = 's'; - assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - 0, "Unexpected tcache flush failure"); + char magic = 's'; + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0, + "Unexpected tcache flush failure"); while (!found) { ptr = mallocx(alloc_size, flags); assert_ptr_not_null(ptr, "Unexpected mallocx failure"); @@ -194,7 +197,7 @@ static bool check_allocated_intact(void **allocated, size_t n_alloc) { for (unsigned i = 0; i < n_alloc; i++) { void *ptr = *(void **)allocated[i]; - bool found = false; + bool found = false; for (unsigned j = 0; j < n_alloc; j++) { if (ptr == allocated[j]) { found = true; @@ -213,7 +216,7 @@ TEST_BEGIN(test_use_after_free_integration) { test_skip_if(!uaf_detection_enabled()); unsigned arena_ind = do_arena_create(-1, -1); - int flags = MALLOCX_ARENA(arena_ind); + int flags = MALLOCX_ARENA(arena_ind); size_t n_alloc = san_uaf_align * 2; void **allocated = mallocx(n_alloc * sizeof(void *), flags); @@ -255,8 +258,6 @@ TEST_END int main(void) { - return test( - test_read_after_free, - test_write_after_free, + return test(test_read_after_free, test_write_after_free, test_use_after_free_integration); } diff --git a/test/unit/witness.c b/test/unit/witness.c index 5a6c4482..ccefb5a2 100644 --- a/test/unit/witness.c +++ b/test/unit/witness.c @@ -1,9 +1,9 @@ #include "test/jemalloc_test.h" -static witness_lock_error_t *witness_lock_error_orig; -static witness_owner_error_t *witness_owner_error_orig; +static witness_lock_error_t *witness_lock_error_orig; +static witness_owner_error_t *witness_owner_error_orig; static witness_not_owner_error_t *witness_not_owner_error_orig; -static witness_depth_error_t *witness_depth_error_orig; +static witness_depth_error_t *witness_depth_error_orig; static bool saw_lock_error; static bool saw_owner_error; @@ -11,8 +11,8 @@ static bool saw_not_owner_error; static bool saw_depth_error; static void -witness_lock_error_intercept(const witness_list_t *witnesses, - const witness_t *witness) { +witness_lock_error_intercept( + const witness_list_t *witnesses, const witness_t *witness) { saw_lock_error = true; } @@ -43,8 +43,8 @@ witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) { } static int -witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, - void *ob) { +witness_comp_reverse( + const witness_t *a, void *oa, const witness_t *b, void *ob) { expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); assert(oa == (void *)a); @@ -54,8 +54,8 @@ witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, } TEST_BEGIN(test_witness) { - witness_t a, b; - witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + witness_t a, b; + witness_tsdn_t witness_tsdn = {WITNESS_TSD_INITIALIZER}; test_skip_if(!config_debug); @@ -94,8 +94,8 @@ TEST_BEGIN(test_witness) { TEST_END TEST_BEGIN(test_witness_comp) { - witness_t a, b, c, d; - witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + witness_t a, b, c, d; + witness_tsdn_t witness_tsdn = {WITNESS_TSD_INITIALIZER}; test_skip_if(!config_debug); @@ -146,8 +146,8 @@ TEST_BEGIN(test_witness_comp) { TEST_END TEST_BEGIN(test_witness_reversal) { - witness_t a, b; - witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + witness_t a, b; + witness_tsdn_t witness_tsdn = {WITNESS_TSD_INITIALIZER}; test_skip_if(!config_debug); @@ -177,8 +177,8 @@ TEST_BEGIN(test_witness_reversal) { TEST_END TEST_BEGIN(test_witness_recursive) { - witness_t a; - witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + witness_t a; + witness_tsdn_t witness_tsdn = {WITNESS_TSD_INITIALIZER}; test_skip_if(!config_debug); @@ -207,13 +207,12 @@ TEST_BEGIN(test_witness_recursive) { witness_owner_error = witness_owner_error_orig; witness_lock_error = witness_lock_error_orig; - } TEST_END TEST_BEGIN(test_witness_unlock_not_owned) { - witness_t a; - witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + witness_t a; + witness_tsdn_t witness_tsdn = {WITNESS_TSD_INITIALIZER}; test_skip_if(!config_debug); @@ -236,8 +235,8 @@ TEST_BEGIN(test_witness_unlock_not_owned) { TEST_END TEST_BEGIN(test_witness_depth) { - witness_t a; - witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + witness_t a; + witness_tsdn_t witness_tsdn = {WITNESS_TSD_INITIALIZER}; test_skip_if(!config_debug); @@ -270,11 +269,7 @@ TEST_END int main(void) { - return test( - test_witness, - test_witness_comp, - test_witness_reversal, - test_witness_recursive, - test_witness_unlock_not_owned, + return test(test_witness, test_witness_comp, test_witness_reversal, + test_witness_recursive, test_witness_unlock_not_owned, test_witness_depth); } diff --git a/test/unit/zero.c b/test/unit/zero.c index d3e81f1b..522d6908 100644 --- a/test/unit/zero.c +++ b/test/unit/zero.c @@ -3,35 +3,35 @@ static void test_zero(size_t sz_min, size_t sz_max) { uint8_t *s; - size_t sz_prev, sz, i; -#define MAGIC ((uint8_t)0x61) + size_t sz_prev, sz, i; +#define MAGIC ((uint8_t)0x61) sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); expect_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; - sz_prev = sz, sz = sallocx(s, 0)) { + sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { expect_u_eq(s[0], MAGIC, "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); - expect_u_eq(s[sz_prev-1], MAGIC, + expect_u_eq(s[sz_prev - 1], MAGIC, "Previously allocated byte %zu/%zu is corrupted", - sz_prev-1, sz_prev); + sz_prev - 1, sz_prev); } for (i = sz_prev; i < sz; i++) { expect_u_eq(s[i], 0x0, - "Newly allocated byte %zu/%zu isn't zero-filled", - i, sz); + "Newly allocated byte %zu/%zu isn't zero-filled", i, + sz); s[i] = MAGIC; } - if (xallocx(s, sz+1, 0, 0) == sz) { - s = (uint8_t *)rallocx(s, sz+1, 0); - expect_ptr_not_null((void *)s, - "Unexpected rallocx() failure"); + if (xallocx(s, sz + 1, 0, 0) == sz) { + s = (uint8_t *)rallocx(s, sz + 1, 0); + expect_ptr_not_null( + (void *)s, "Unexpected rallocx() failure"); } } @@ -53,7 +53,5 @@ TEST_END int main(void) { - return test( - test_zero_small, - test_zero_large); + return test(test_zero_small, test_zero_large); } diff --git a/test/unit/zero_realloc_abort.c b/test/unit/zero_realloc_abort.c index f014cdc2..1d8bf9c3 100644 --- a/test/unit/zero_realloc_abort.c +++ b/test/unit/zero_realloc_abort.c @@ -4,7 +4,8 @@ static bool abort_called = false; -void set_abort_called(const char *message) { +void +set_abort_called(const char *message) { (void)message; abort_called = true; }; @@ -21,7 +22,5 @@ TEST_END int main(void) { - return test( - test_realloc_abort); + return test(test_realloc_abort); } - diff --git a/test/unit/zero_realloc_alloc.c b/test/unit/zero_realloc_alloc.c index 6954818c..5b4f985f 100644 --- a/test/unit/zero_realloc_alloc.c +++ b/test/unit/zero_realloc_alloc.c @@ -6,9 +6,10 @@ allocated(void) { return 0; } uint64_t allocated; - size_t sz = sizeof(allocated); - expect_d_eq(mallctl("thread.allocated", (void *)&allocated, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + size_t sz = sizeof(allocated); + expect_d_eq( + mallctl("thread.allocated", (void *)&allocated, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); return allocated; } @@ -18,9 +19,10 @@ deallocated(void) { return 0; } uint64_t deallocated; - size_t sz = sizeof(deallocated); - expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + size_t sz = sizeof(deallocated); + expect_d_eq( + mallctl("thread.deallocated", (void *)&deallocated, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); return deallocated; } @@ -43,6 +45,5 @@ TEST_BEGIN(test_realloc_alloc) { TEST_END int main(void) { - return test( - test_realloc_alloc); + return test(test_realloc_alloc); } diff --git a/test/unit/zero_realloc_free.c b/test/unit/zero_realloc_free.c index 277f219d..c2aa0afa 100644 --- a/test/unit/zero_realloc_free.c +++ b/test/unit/zero_realloc_free.c @@ -6,9 +6,10 @@ deallocated(void) { return 0; } uint64_t deallocated; - size_t sz = sizeof(deallocated); - expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + size_t sz = sizeof(deallocated); + expect_d_eq( + mallctl("thread.deallocated", (void *)&deallocated, &sz, NULL, 0), + 0, "Unexpected mallctl failure"); return deallocated; } @@ -28,6 +29,5 @@ TEST_END int main(void) { - return test( - test_realloc_free); + return test(test_realloc_free); } diff --git a/test/unit/zero_reallocs.c b/test/unit/zero_reallocs.c index a9077222..6c4a51d6 100644 --- a/test/unit/zero_reallocs.c +++ b/test/unit/zero_reallocs.c @@ -8,8 +8,9 @@ zero_reallocs(void) { size_t count = 12345; size_t sz = sizeof(count); - expect_d_eq(mallctl("stats.zero_reallocs", (void *)&count, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + expect_d_eq( + mallctl("stats.zero_reallocs", (void *)&count, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); return count; } @@ -35,6 +36,5 @@ main(void) { * We expect explicit counts; reentrant tests run multiple times, so * counts leak across runs. */ - return test_no_reentrancy( - test_zero_reallocs); + return test_no_reentrancy(test_zero_reallocs); }