mirror of
https://github.com/jemalloc/jemalloc.git
synced 2026-05-13 16:36:21 +03:00
Extend per arena unused dirty page purging to manage unused dirty chunks in aaddtion to unused dirty runs. Rather than immediately unmapping deallocated chunks (or purging them in the --disable-munmap case), store them in a separate set of trees, chunks_[sz]ad_dirty. Preferrentially allocate dirty chunks. When excessive unused dirty pages accumulate, purge runs and chunks in ingegrated LRU order (and unmap chunks in the --enable-munmap case). Refactor extent_node_t to provide accessor functions.
38 lines
1 KiB
C
38 lines
1 KiB
C
#define JEMALLOC_EXTENT_C_
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
|
|
|
/******************************************************************************/
|
|
|
|
JEMALLOC_INLINE_C int
|
|
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
|
{
|
|
int ret;
|
|
size_t a_size = extent_node_size_get(a);
|
|
size_t b_size = extent_node_size_get(b);
|
|
|
|
ret = (a_size > b_size) - (a_size < b_size);
|
|
if (ret == 0) {
|
|
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
|
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
|
|
|
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
|
}
|
|
|
|
return (ret);
|
|
}
|
|
|
|
/* Generate red-black tree functions. */
|
|
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
|
|
extent_szad_comp)
|
|
|
|
JEMALLOC_INLINE_C int
|
|
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
|
{
|
|
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
|
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
|
|
|
return ((a_addr > b_addr) - (a_addr < b_addr));
|
|
}
|
|
|
|
/* Generate red-black tree functions. */
|
|
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
|