mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-05-01 09:37:50 +03:00
Suppress tdata creation in reentrancy
This change suppresses tdata initialization and prof sample threshold update in interrupting malloc calls. Interrupting calls have no need for tdata. Delaying tdata creation aligns better with our lazy tdata creation principle, and it also helps us gain control back from interrupting calls more quickly and reduces any risk of delegating tdata creation to an interrupting call.
This commit is contained in:
parent
beb7c16e94
commit
66e07f986d
3 changed files with 23 additions and 6 deletions
|
|
@ -22,6 +22,7 @@ prof_tdata_get(tsd_t *tsd, bool create) {
|
|||
|
||||
tdata = tsd_prof_tdata_get(tsd);
|
||||
if (create) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
if (unlikely(tdata == NULL)) {
|
||||
if (tsd_nominal(tsd)) {
|
||||
tdata = prof_tdata_init(tsd);
|
||||
|
|
@ -109,7 +110,11 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool booted = tsd_prof_tdata_get(tsd);
|
||||
if (tsd_reentrancy_level_get(tsd) > 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool booted = prof_tdata_get(tsd, false);
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
|
||||
tdata = NULL;
|
||||
|
|
@ -132,9 +137,6 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
|||
return true;
|
||||
}
|
||||
|
||||
if (tsd_reentrancy_level_get(tsd) > 0) {
|
||||
return true;
|
||||
}
|
||||
/* Compute new sample threshold. */
|
||||
if (update) {
|
||||
prof_sample_threshold_update(tdata);
|
||||
|
|
|
|||
17
src/prof.c
17
src/prof.c
|
|
@ -127,10 +127,15 @@ prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
|
|||
|
||||
void
|
||||
prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (tsd_reentrancy_level_get(tsd) > 0) {
|
||||
assert((uintptr_t)tctx == (uintptr_t)1U);
|
||||
return;
|
||||
}
|
||||
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
if (updated) {
|
||||
/*
|
||||
* Compute a new sample threshold. This isn't very important in
|
||||
|
|
@ -810,6 +815,8 @@ prof_active_set(tsdn_t *tsdn, bool active) {
|
|||
|
||||
const char *
|
||||
prof_thread_name_get(tsd_t *tsd) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
|
|
@ -821,6 +828,8 @@ prof_thread_name_get(tsd_t *tsd) {
|
|||
|
||||
int
|
||||
prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
|
||||
prof_tdata_t *tdata;
|
||||
unsigned i;
|
||||
char *s;
|
||||
|
|
@ -859,6 +868,8 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
|
|||
|
||||
bool
|
||||
prof_thread_active_get(tsd_t *tsd) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
|
|
@ -870,6 +881,8 @@ prof_thread_active_get(tsd_t *tsd) {
|
|||
|
||||
bool
|
||||
prof_thread_active_set(tsd_t *tsd, bool active) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
|
|
|
|||
|
|
@ -1199,6 +1199,8 @@ prof_bt_keycomp(const void *k1, const void *k2) {
|
|||
prof_tdata_t *
|
||||
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
|
||||
char *thread_name, bool active) {
|
||||
assert(tsd_reentrancy_level_get(tsd) == 0);
|
||||
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue