Mark partially purged arena chunks as non-hugepage.

Add the pages_[no]huge() functions, which toggle huge page state via
madvise(..., MADV_[NO]HUGEPAGE) calls.

The first time a page run is purged from within an arena chunk, call
pages_nohuge() to tell the kernel to make no further attempts to back
the chunk with huge pages.  Upon arena chunk deletion, restore the
associated virtual memory to its original state via pages_huge().

This resolves #243.
This commit is contained in:
Jason Evans 2016-11-17 13:36:17 -08:00
parent fc11f3cb84
commit e98a620c59
9 changed files with 110 additions and 5 deletions

View file

@ -664,6 +664,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
if (chunk == NULL)
return (NULL);
chunk->hugepage = true;
/*
* Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
@ -727,13 +729,14 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
static void
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
size_t sn;
size_t sn, hugepage;
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
sn = extent_node_sn_get(&chunk->node);
hugepage = chunk->hugepage;
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
@ -746,6 +749,14 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
if (!hugepage) {
/*
* Convert chunk back to the default state, so that all
* subsequent chunk allocations start out with chunks that can
* be backed by transparent huge pages.
*/
pages_huge(chunk, chunksize);
}
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
sn, committed);
@ -1682,6 +1693,17 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
/*
* If this is the first run purged within chunk, mark
* the chunk as non-huge. This will prevent all use of
* transparent huge pages for this chunk until the chunk
* as a whole is deallocated.
*/
if (chunk->hugepage) {
pages_nohuge(chunk, chunksize);
chunk->hugepage = false;
}
assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk,

View file

@ -170,7 +170,8 @@ pages_purge(void *addr, size_t size)
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
#elif defined(JEMALLOC_HAVE_MADVISE)
#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
defined(JEMALLOC_PURGE_MADVISE_FREE))
# if defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
@ -191,6 +192,34 @@ pages_purge(void *addr, size_t size)
return (unzeroed);
}
bool
pages_huge(void *addr, size_t size)
{
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
#else
return (false);
#endif
}
bool
pages_nohuge(void *addr, size_t size)
{
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
#else
return (false);
#endif
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void)