diff --git a/.travis.yml b/.travis.yml index 4cc116e5..7d93ead5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -143,7 +143,12 @@ matrix: env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + script: + - make check + - make -j test/unit/log + - test/unit/log before_script: - autoconf diff --git a/configure.ac b/configure.ac index a6a08db0..1c209117 100644 --- a/configure.ac +++ b/configure.ac @@ -242,6 +242,7 @@ if test "x$GCC" = "xyes" ; then fi fi JE_CFLAGS_ADD([-Wall]) + JE_CFLAGS_ADD([-Wextra]) JE_CFLAGS_ADD([-Wshorten-64-to-32]) JE_CFLAGS_ADD([-Wsign-compare]) JE_CFLAGS_ADD([-Wundef]) @@ -289,6 +290,7 @@ if test "x$enable_cxx" = "x1" ; then AX_CXX_COMPILE_STDCXX([14], [noext], [optional]) if test "x${HAVE_CXX14}" = "x1" ; then JE_CXXFLAGS_ADD([-Wall]) + JE_CXXFLAGS_ADD([-Wextra]) JE_CXXFLAGS_ADD([-g3]) SAVED_LIBS="${LIBS}" diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 401be758..d388cae9 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -49,7 +49,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { } JEMALLOC_ALWAYS_INLINE void -arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize, +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); @@ -68,7 +68,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize, } static inline void -arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) { +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); @@ -318,7 +318,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, szind_t szind; bool slab; - UNUSED alloc_ctx_t local_ctx; + alloc_ctx_t local_ctx; if (config_prof && opt_prof) { if (alloc_ctx == NULL) { /* Uncommon case and should be a static check. */ diff --git a/include/jemalloc/internal/arena_stats.h b/include/jemalloc/internal/arena_stats.h index 5f3dca8b..39b7262a 100644 --- a/include/jemalloc/internal/arena_stats.h +++ b/include/jemalloc/internal/arena_stats.h @@ -6,6 +6,8 @@ #include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/size_classes.h" +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + /* * In those architectures that support 64-bit atomics, we use atomic updates for * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize @@ -95,7 +97,7 @@ struct arena_stats_s { }; static inline bool -arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) { +arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { if (config_debug) { for (size_t i = 0; i < sizeof(arena_stats_t); i++) { assert(((char *)arena_stats)[i] == 0); @@ -147,11 +149,11 @@ arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, #endif } -UNUSED static inline void +static inline void arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_u64_t *p, uint64_t x) { #ifdef JEMALLOC_ATOMIC_U64 - UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); + uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); assert(r - x <= r); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); @@ -176,7 +178,8 @@ arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { } static inline size_t -arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { +arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, + atomic_zu_t *p) { #ifdef JEMALLOC_ATOMIC_U64 return atomic_load_zu(p, ATOMIC_RELAXED); #else @@ -186,8 +189,8 @@ arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { } static inline void -arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, - size_t x) { +arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, + atomic_zu_t *p, size_t x) { #ifdef JEMALLOC_ATOMIC_U64 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); #else @@ -198,10 +201,10 @@ arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, } static inline void -arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, - size_t x) { +arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, + atomic_zu_t *p, size_t x) { #ifdef JEMALLOC_ATOMIC_U64 - UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); + size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); assert(r - x <= r); #else malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); @@ -233,5 +236,4 @@ arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { arena_stats_unlock(tsdn, arena_stats); } - #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ diff --git a/include/jemalloc/internal/atomic_gcc_sync.h b/include/jemalloc/internal/atomic_gcc_sync.h index 30846e4d..06a0acf3 100644 --- a/include/jemalloc/internal/atomic_gcc_sync.h +++ b/include/jemalloc/internal/atomic_gcc_sync.h @@ -113,8 +113,8 @@ atomic_store_##short_type(atomic_##short_type##_t *a, \ } \ \ ATOMIC_INLINE type \ -atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ - atomic_memory_order_t mo) { \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ /* \ * Because of FreeBSD, we care about gcc 4.2, which doesn't have\ * an atomic exchange builtin. We fake it with a CAS loop. \ @@ -129,8 +129,9 @@ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ - type *expected, type desired, atomic_memory_order_t success_mo, \ - atomic_memory_order_t failure_mo) { \ + type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ desired); \ if (prev == *expected) { \ @@ -142,8 +143,9 @@ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ } \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ - type *expected, type desired, atomic_memory_order_t success_mo, \ - atomic_memory_order_t failure_mo) { \ + type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ desired); \ if (prev == *expected) { \ diff --git a/include/jemalloc/internal/extent_inlines.h b/include/jemalloc/internal/extent_inlines.h index 77181df8..9b8ddc27 100644 --- a/include/jemalloc/internal/extent_inlines.h +++ b/include/jemalloc/internal/extent_inlines.h @@ -190,7 +190,7 @@ extent_addr_set(extent_t *extent, void *addr) { } static inline void -extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) { +extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) { assert(extent_base_get(extent) == extent_addr_get(extent)); if (alignment < PAGE) { diff --git a/include/jemalloc/internal/hash.h b/include/jemalloc/internal/hash.h index dcfc992d..0270034e 100644 --- a/include/jemalloc/internal/hash.h +++ b/include/jemalloc/internal/hash.h @@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) { uint32_t k1 = 0; switch (len & 3) { - case 3: k1 ^= tail[2] << 16; - case 2: k1 ^= tail[1] << 8; + case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH + case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } @@ -119,7 +119,7 @@ hash_x86_32(const void *key, int len, uint32_t seed) { return h1; } -UNUSED static inline void +static inline void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; @@ -177,28 +177,29 @@ hash_x86_128(const void *key, const int len, uint32_t seed, uint32_t k4 = 0; switch (len & 15) { - case 15: k4 ^= tail[14] << 16; - case 14: k4 ^= tail[13] << 8; + case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH + case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - case 12: k3 ^= tail[11] << 24; - case 11: k3 ^= tail[10] << 16; - case 10: k3 ^= tail[ 9] << 8; + JEMALLOC_FALLTHROUGH + case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH + case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH + case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - case 8: k2 ^= tail[ 7] << 24; - case 7: k2 ^= tail[ 6] << 16; - case 6: k2 ^= tail[ 5] << 8; + JEMALLOC_FALLTHROUGH + case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH + case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH + case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - case 4: k1 ^= tail[ 3] << 24; - case 3: k1 ^= tail[ 2] << 16; - case 2: k1 ^= tail[ 1] << 8; + JEMALLOC_FALLTHROUGH + case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH + case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH + case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + JEMALLOC_FALLTHROUGH } } @@ -220,7 +221,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed, r_out[1] = (((uint64_t) h4) << 32) | h3; } -UNUSED static inline void +static inline void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; @@ -260,22 +261,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t k2 = 0; switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */ - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */ - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */ - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */ - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */ - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */ + case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH + case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH + case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH + case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH + case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH + case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - /* falls through */ - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */ - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */ - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */ - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */ - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */ - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */ - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */ + JEMALLOC_FALLTHROUGH + case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH + case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH + case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH + case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH + case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH + case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH + case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } diff --git a/include/jemalloc/internal/jemalloc_internal_macros.h b/include/jemalloc/internal/jemalloc_internal_macros.h index ed75d376..a1a761b8 100644 --- a/include/jemalloc/internal/jemalloc_internal_macros.h +++ b/include/jemalloc/internal/jemalloc_internal_macros.h @@ -40,4 +40,62 @@ #define JEMALLOC_VA_ARGS_HEAD(head, ...) head #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ +#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \ + && defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7) +#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough); +#else +#define JEMALLOC_FALLTHROUGH /* falls through */ +#endif + + +/* Diagnostic suppression macros */ +#if defined(_MSC_VER) && !defined(__clang__) +# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) +# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop)) +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W)) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS +#elif defined(__GNUC__) || defined(__clang__) +/* + * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang + * diagnostic suppression macros and should not be used anywhere else. + */ +# define JEMALLOC_PRAGMA__(X) _Pragma(#X) +# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push) +# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop) +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) JEMALLOC_PRAGMA__(GCC diagnostic ignored W) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers") +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits") +# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter") +# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7) +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=") +# else +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# endif +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \ + JEMALLOC_DIAGNOSTIC_PUSH \ + JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER +#else +# define JEMALLOC_DIAGNOSTIC_PUSH +# define JEMALLOC_DIAGNOSTIC_POP +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS +#endif + +/* + * Disables spurious diagnostics for all headers + * Since these headers are not included by users directly, + * it does not affect their diagnostic settings. + */ +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + #endif /* JEMALLOC_INTERNAL_MACROS_H */ diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h index 6520c251..651ce5f9 100644 --- a/include/jemalloc/internal/mutex.h +++ b/include/jemalloc/internal/mutex.h @@ -101,9 +101,15 @@ struct malloc_mutex_s { #ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) -# define MALLOC_MUTEX_INITIALIZER \ +# if defined(JEMALLOC_DEBUG) +# define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} +# else +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +# endif #elif (defined(JEMALLOC_OSSPIN)) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \ @@ -111,12 +117,18 @@ struct malloc_mutex_s { #elif (defined(JEMALLOC_MUTEX_INIT_CB)) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# if defined(JEMALLOC_DEBUG) # define MALLOC_MUTEX_INITIALIZER \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ - WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} +# else +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +# endif #endif #ifdef JEMALLOC_LAZY_LOCK diff --git a/include/jemalloc/internal/prof_inlines_a.h b/include/jemalloc/internal/prof_inlines_a.h index a6efb485..c39bc3d4 100644 --- a/include/jemalloc/internal/prof_inlines_a.h +++ b/include/jemalloc/internal/prof_inlines_a.h @@ -4,7 +4,8 @@ #include "jemalloc/internal/mutex.h" static inline bool -prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { +prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, + uint64_t accumbytes) { cassert(config_prof); bool overflow; @@ -42,7 +43,8 @@ prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { } static inline void -prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) { +prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, + size_t usize) { cassert(config_prof); /* diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index b59d33a8..dd452f16 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -170,8 +170,8 @@ rtree_subkey(uintptr_t key, unsigned level) { */ # ifdef RTREE_LEAF_COMPACT JEMALLOC_ALWAYS_INLINE uintptr_t -rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, - bool dependent) { +rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { return (uintptr_t)atomic_load_p(&elm->le_bits, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); } @@ -208,7 +208,7 @@ rtree_leaf_elm_bits_slab_get(uintptr_t bits) { # endif JEMALLOC_ALWAYS_INLINE extent_t * -rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, +rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); @@ -221,7 +221,7 @@ rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, } JEMALLOC_ALWAYS_INLINE szind_t -rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, +rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); @@ -233,7 +233,7 @@ rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, } JEMALLOC_ALWAYS_INLINE bool -rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, +rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); @@ -245,7 +245,7 @@ rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, } static inline void -rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, +rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, extent_t *extent) { #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); @@ -259,7 +259,7 @@ rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, } static inline void -rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, +rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, szind_t szind) { assert(szind <= NSIZES); @@ -277,7 +277,7 @@ rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, } static inline void -rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, +rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, bool slab) { #ifdef RTREE_LEAF_COMPACT uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, @@ -292,8 +292,8 @@ rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, } static inline void -rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, - extent_t *extent, szind_t szind, bool slab) { +rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | diff --git a/include/jemalloc/internal/rtree_tsd.h b/include/jemalloc/internal/rtree_tsd.h index 93a75173..562e2929 100644 --- a/include/jemalloc/internal/rtree_tsd.h +++ b/include/jemalloc/internal/rtree_tsd.h @@ -26,7 +26,7 @@ * Zero initializer required for tsd initialization only. Proper initialization * done via rtree_ctx_data_init(). */ -#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}} +#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}} typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h index 0f6ab8cb..c426c567 100644 --- a/include/jemalloc/internal/tcache_inlines.h +++ b/include/jemalloc/internal/tcache_inlines.h @@ -40,7 +40,7 @@ tcache_event(tsd_t *tsd, tcache_t *tcache) { JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - UNUSED size_t size, szind_t binind, bool zero, bool slow_path) { + size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; cache_bin_t *bin; bool tcache_success; diff --git a/include/jemalloc/internal/tsd_generic.h b/include/jemalloc/internal/tsd_generic.h index 1e52ef76..cf73c0c7 100644 --- a/include/jemalloc/internal/tsd_generic.h +++ b/include/jemalloc/internal/tsd_generic.h @@ -77,7 +77,10 @@ tsd_wrapper_get(bool init) { abort(); } else { wrapper->initialized = false; + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS tsd_t initializer = TSD_INITIALIZER; + JEMALLOC_DIAGNOSTIC_POP wrapper->val = initializer; } tsd_wrapper_set(wrapper); @@ -107,7 +110,10 @@ tsd_boot1(void) { tsd_boot_wrapper.initialized = false; tsd_cleanup(&tsd_boot_wrapper.val); wrapper->initialized = false; + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS tsd_t initializer = TSD_INITIALIZER; + JEMALLOC_DIAGNOSTIC_POP wrapper->val = initializer; tsd_wrapper_set(wrapper); } diff --git a/include/jemalloc/internal/tsd_tls.h b/include/jemalloc/internal/tsd_tls.h index 0de64b7b..757aaa0e 100644 --- a/include/jemalloc/internal/tsd_tls.h +++ b/include/jemalloc/internal/tsd_tls.h @@ -39,7 +39,7 @@ tsd_get_allocates(void) { /* Get/set. */ JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_get(UNUSED bool init) { +tsd_get(bool init) { assert(tsd_booted); return &tsd_tls; } diff --git a/src/arena.c b/src/arena.c index 49d86d2c..eefea0dc 100644 --- a/src/arena.c +++ b/src/arena.c @@ -11,6 +11,8 @@ #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/util.h" +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + /******************************************************************************/ /* Data. */ @@ -65,7 +67,7 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, /******************************************************************************/ void -arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy) { *nthreads += arena_nthreads_get(arena, false); @@ -752,7 +754,7 @@ static size_t arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, bool all, extent_list_t *decay_extents, bool is_background_thread) { - UNUSED size_t nmadvise, nunmapped; + size_t nmadvise, nunmapped; size_t npurged; if (config_stats) { @@ -843,7 +845,7 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, npages_limit, npages_decay_max, &decay_extents); if (npurge != 0) { - UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, + size_t npurged = arena_decay_stashed(tsdn, arena, &extent_hooks, decay, extents, all, &decay_extents, is_background_thread); assert(npurged == npurge); @@ -872,7 +874,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, is_background_thread); - UNUSED size_t npages_new; + size_t npages_new; if (epoch_advanced) { /* Backlog is updated on epoch advance. */ npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; @@ -1508,7 +1510,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, } static void -arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, +arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(extent_nfree_get(slab) > 0); diff --git a/src/background_thread.c b/src/background_thread.c index 4613537c..feed8564 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -4,6 +4,8 @@ #include "jemalloc/internal/assert.h" +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + /******************************************************************************/ /* Data. */ @@ -78,7 +80,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { } static inline bool -set_current_thread_affinity(UNUSED int cpu) { +set_current_thread_affinity(int cpu) { #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) cpu_set_t cpuset; CPU_ZERO(&cpuset); diff --git a/src/ctl.c b/src/ctl.c index 5c94cdbc..3f7dea16 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -1392,8 +1392,8 @@ label_return: \ #define CTL_RO_CGEN(c, n, v, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1435,8 +1435,8 @@ label_return: \ */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1454,8 +1454,8 @@ label_return: \ #define CTL_RO_NL_GEN(n, v, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1489,8 +1489,8 @@ label_return: \ #define CTL_RO_CONFIG_GEN(n, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1508,8 +1508,8 @@ label_return: \ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int -epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; @@ -1527,8 +1527,9 @@ label_return: } static int -background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +background_thread_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { int ret; bool oldval; @@ -1578,8 +1579,9 @@ label_return: } static int -max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +max_background_threads_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; size_t oldval; @@ -1691,8 +1693,8 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int -thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; arena_t *oldarena; unsigned newind, oldind; @@ -1756,8 +1758,9 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, tsd_thread_deallocatedp_get, uint64_t *) static int -thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; bool oldval; @@ -1777,8 +1780,9 @@ label_return: } static int -thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; if (!tcache_available(tsd)) { @@ -1797,8 +1801,9 @@ label_return: } static int -thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; if (!config_prof) { @@ -1828,8 +1833,9 @@ label_return: } static int -thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; bool oldval; @@ -1858,8 +1864,8 @@ label_return: /******************************************************************************/ static int -tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; @@ -1876,8 +1882,8 @@ label_return: } static int -tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; @@ -1896,8 +1902,8 @@ label_return: } static int -tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; @@ -2299,8 +2305,9 @@ label_return: } static int -arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; @@ -2335,7 +2342,8 @@ label_return: } static const ctl_named_node_t * -arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { const ctl_named_node_t *ret; malloc_mutex_lock(tsdn, &ctl_mtx); @@ -2360,8 +2368,8 @@ label_return: /******************************************************************************/ static int -arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; @@ -2381,8 +2389,9 @@ label_return: } static int -arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { +arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen, bool dirty) { int ret; if (oldp != NULL && oldlenp != NULL) { @@ -2430,7 +2439,8 @@ CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { if (i > NBINS) { return NULL; } @@ -2441,8 +2451,8 @@ CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t i) { +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { if (i > NSIZES - NBINS) { return NULL; } @@ -2450,8 +2460,8 @@ arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, } static int -arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; extent_hooks_t *extent_hooks; unsigned arena_ind; @@ -2473,8 +2483,9 @@ label_return: } static int -arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; unsigned arena_ind; void *ptr; @@ -2505,8 +2516,9 @@ label_return: /******************************************************************************/ static int -prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { int ret; bool oldval; @@ -2532,8 +2544,8 @@ label_return: } static int -prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; @@ -2558,8 +2570,8 @@ label_return: } static int -prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; @@ -2581,8 +2593,8 @@ label_return: } static int -prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; @@ -2607,8 +2619,8 @@ label_return: } static int -prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; @@ -2764,8 +2776,9 @@ RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, /* Resets all mutex stats, including global, arena and bin mutexes. */ static int -stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { +stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { if (!config_stats) { return ENOENT; } @@ -2834,8 +2847,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) static const ctl_named_node_t * -stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t j) { +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t j) { if (j > NBINS) { return NULL; } @@ -2855,8 +2868,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) static const ctl_named_node_t * -stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t j) { +stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t j) { if (j > NSIZES - NBINS) { return NULL; } @@ -2864,7 +2877,8 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, } static const ctl_named_node_t * -stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { const ctl_named_node_t *ret; size_t a; diff --git a/src/extent.c b/src/extent.c index 09d6d771..4b1a6dfd 100644 --- a/src/extent.c +++ b/src/extent.c @@ -119,9 +119,13 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena, /******************************************************************************/ -ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, +#define ATTR_NONE /* does nothing */ + +ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link, extent_esnead_comp) +#undef ATTR_NONE + typedef enum { lock_result_success, lock_result_failure, diff --git a/src/jemalloc.c b/src/jemalloc.c index 28d1344c..82c08877 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -970,6 +970,14 @@ malloc_conf_init(void) { } \ continue; \ } + /* + * One of the CONF_MIN macros below expands, in one of the use points, + * to "unsigned integer < 0", which is always false, triggering the + * GCC -Wtype-limits warning, which we disable here and re-enable below. + */ + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS + #define CONF_MIN_no(um, min) false #define CONF_MIN_yes(um, min) ((um) < (min)) #define CONF_MAX_no(um, max) false @@ -1246,6 +1254,8 @@ malloc_conf_init(void) { #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P + /* Re-enable diagnostic "-Wtype-limits" */ + JEMALLOC_DIAGNOSTIC_POP } if (opt_abort_conf && had_conf_error) { malloc_abort_invalid_conf(); @@ -2992,7 +3002,7 @@ label_not_resized: JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) -je_sallocx(const void *ptr, UNUSED int flags) { +je_sallocx(const void *ptr, int flags) { size_t usize; tsdn_t *tsdn; diff --git a/src/mutex.c b/src/mutex.c index 30222b3e..55e37ad4 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -46,7 +46,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void malloc_mutex_lock_slow(malloc_mutex_t *mutex) { mutex_prof_data_t *data = &mutex->prof_data; - UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER; + nstime_t before = NSTIME_ZERO_INITIALIZER; if (ncpus == 1) { goto label_spin_done; diff --git a/src/rtree.c b/src/rtree.c index 53702cf7..4ae41fe2 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -39,7 +39,7 @@ rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { /* Nodes are never deleted during normal operation. */ not_reached(); } -UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = +rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = rtree_node_dalloc_impl; static rtree_leaf_elm_t * @@ -54,7 +54,7 @@ rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { /* Leaves are never deleted during normal operation. */ not_reached(); } -UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = +rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = rtree_leaf_dalloc_impl; #ifdef JEMALLOC_JET diff --git a/src/tcache.c b/src/tcache.c index af757540..d624d924 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -206,7 +206,7 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, /* Lock the arena associated with the first object. */ extent_t *extent = item_extent[0]; arena_t *locked_arena = extent_arena_get(extent); - UNUSED bool idump; + bool idump; if (config_prof) { idump = false; diff --git a/src/tsd.c b/src/tsd.c index 4eceee79..f2b601dd 100644 --- a/src/tsd.c +++ b/src/tsd.c @@ -12,6 +12,10 @@ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; +/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP __thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; __thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; @@ -41,6 +45,7 @@ tsd_init_head_t tsd_init_head = { ql_head_initializer(blocks), MALLOC_MUTEX_INITIALIZER }; + tsd_wrapper_t tsd_boot_wrapper = { false, TSD_INITIALIZER @@ -48,6 +53,7 @@ tsd_wrapper_t tsd_boot_wrapper = { bool tsd_booted = false; #endif +JEMALLOC_DIAGNOSTIC_POP /******************************************************************************/ diff --git a/test/integration/aligned_alloc.c b/test/integration/aligned_alloc.c index 536b67ea..cfe1df9d 100644 --- a/test/integration/aligned_alloc.c +++ b/test/integration/aligned_alloc.c @@ -34,6 +34,17 @@ TEST_BEGIN(test_alignment_errors) { } TEST_END + +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; @@ -78,6 +89,9 @@ TEST_BEGIN(test_oom_errors) { } TEST_END +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c index 9fe3ad5d..ce5069a7 100644 --- a/test/integration/mallocx.c +++ b/test/integration/mallocx.c @@ -51,6 +51,16 @@ purge(void) { "Unexpected mallctl error"); } +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + TEST_BEGIN(test_overflow) { size_t largemax; @@ -145,6 +155,9 @@ TEST_BEGIN(test_oom) { } TEST_END +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 23) size_t sz; diff --git a/test/integration/overflow.c b/test/integration/overflow.c index 6a9785b2..748ebb67 100644 --- a/test/integration/overflow.c +++ b/test/integration/overflow.c @@ -1,5 +1,15 @@ #include "test/jemalloc_test.h" +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + TEST_BEGIN(test_overflow) { unsigned nlextents; size_t mib[4]; @@ -39,6 +49,9 @@ TEST_BEGIN(test_overflow) { } TEST_END +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + int main(void) { return test( diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c index 7821ca5f..08ed08d3 100644 --- a/test/integration/rallocx.c +++ b/test/integration/rallocx.c @@ -208,6 +208,16 @@ TEST_BEGIN(test_lg_align_and_zero) { } TEST_END +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + TEST_BEGIN(test_overflow) { size_t largemax; void *p; @@ -234,6 +244,9 @@ TEST_BEGIN(test_overflow) { } TEST_END +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + int main(void) { return test( diff --git a/test/unit/emitter.c b/test/unit/emitter.c index 535c7cf1..6ffd1c3a 100644 --- a/test/unit/emitter.c +++ b/test/unit/emitter.c @@ -347,11 +347,11 @@ static void emit_table_row(emitter_t *emitter) { emitter_begin(emitter); emitter_row_t row; - emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title}; + emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}}; abc.str_val = "ABC title"; - emitter_col_t def = {emitter_justify_right, 15, emitter_type_title}; + emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}}; def.str_val = "DEF title"; - emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title}; + emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}}; ghi.str_val = "GHI"; emitter_row_init(&row);