From b43b7750a6e08706aeb61d83ff1e1eb0c81c910b Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Fri, 4 Jun 2010 15:10:43 -0700 Subject: [PATCH 1/3] Fix the libunwind version of prof_backtrace(). Fix the libunwind version of prof_backtrace() to set the backtrace depth for all possible code paths. This fixes the zero-length backtrace problem when using libunwind. --- jemalloc/src/prof.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c index 88e8f44a..6d6910ed 100644 --- a/jemalloc/src/prof.c +++ b/jemalloc/src/prof.c @@ -239,16 +239,15 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max) } /* - * Iterate over stack frames until there are no more. Heap-allocate - * and iteratively grow a larger bt if necessary. + * Iterate over stack frames until there are no more, or until no space + * remains in bt. */ for (i = 0; i < max; i++) { unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); + bt->len++; err = unw_step(&cursor); - if (err <= 0) { - bt->len = i; + if (err <= 0) break; - } } } #else From 2541e1b083a81f29554dcba7b2eaa1cc9889219a Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Thu, 22 Jul 2010 11:35:59 -0700 Subject: [PATCH 2/3] Add a missing mutex unlock in malloc_init_hard(). If multiple threads race to initialize malloc, the loser(s) busy-wait until initialization is complete. Add a missing mutex lock so that the loser(s) properly release the initialization mutex. Under some race conditions, this flaw could have caused one or more threads to become permanently blocked. Reported by Terrell Magee. --- jemalloc/src/jemalloc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c index bf2ace39..b36590dd 100644 --- a/jemalloc/src/jemalloc.c +++ b/jemalloc/src/jemalloc.c @@ -324,6 +324,7 @@ malloc_init_hard(void) CPU_SPINWAIT; malloc_mutex_lock(&init_lock); } while (malloc_initialized == false); + malloc_mutex_unlock(&init_lock); return (false); } From dcd15098a8adfa6e44d7d1d041df968fb5fe9d82 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Thu, 5 Aug 2010 12:13:42 -0700 Subject: [PATCH 3/3] Move assert() calls up in arena_run_reg_alloc(). Move assert() calls up in arena_run_reg_alloc(), so that a corrupt pointer will likely be caught by an assertion *before* it is dereferenced. --- jemalloc/src/arena.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c index e4142267..ee859fcb 100644 --- a/jemalloc/src/arena.c +++ b/jemalloc/src/arena.c @@ -254,7 +254,6 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin) run->nfree--; ret = run->avail; if (ret != NULL) { - run->avail = *(void **)ret; /* Double free can cause assertion failure.*/ assert(ret != NULL); /* Write-after free can cause assertion failure. */ @@ -264,6 +263,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin) assert(((uintptr_t)ret - ((uintptr_t)run + (uintptr_t)bin->reg0_offset)) % (uintptr_t)bin->reg_size == 0); + run->avail = *(void **)ret; return (ret); } ret = run->next;