tcmalloc: ability to disable fallback allocator in memfs

Whenever tcmalloc memfs allocator runs out of huge memory, it falls back to the
default system allocator, which will start allocating from normal anonymous
memory pool (small pages).  For use cases that exclusively want hugepage backed
memory, such as using tcmalloc in combination with Intel SPDK, this is
problematic.

This change adds an environment variable "TCMALLOC_MEMFS_DISABLE_FALLBACK"
which when set to true will ensure that tcmalloc doesn't fallback to using the
system allocator when the system runs out of hugepages. This is false by
default, such that all other tcmalloc consumers will not see any changes.

For those curious, you can see part of our upstream SPDK work here: 
https://review.gerrithub.io/c/spdk/spdk/+/475943
This commit is contained in:
Jon Kohler 2020-01-19 22:09:08 -05:00 committed by Aliaksey Kandratsenka
parent 1bfcb5bc3a
commit b7607ee7d4

View File

@ -85,6 +85,10 @@ DEFINE_bool(memfs_malloc_ignore_mmap_fail,
DEFINE_bool(memfs_malloc_map_private,
EnvToBool("TCMALLOC_MEMFS_MAP_PRIVATE", false),
"Use MAP_PRIVATE with mmap");
DEFINE_bool(memfs_malloc_disable_fallback,
EnvToBool("TCMALLOC_MEMFS_DISABLE_FALLBACK", false),
"If we run out of hugepage memory don't fallback to default "
"allocator.");
// Hugetlbfs based allocator for tcmalloc
class HugetlbSysAllocator: public SysAllocator {
@ -120,13 +124,14 @@ static union {
// us with an internal lock held (see tcmalloc/system-alloc.cc).
void* HugetlbSysAllocator::Alloc(size_t size, size_t *actual_size,
size_t alignment) {
if (failed_) {
if (!FLAGS_memfs_malloc_disable_fallback && failed_) {
return fallback_->Alloc(size, actual_size, alignment);
}
// We don't respond to allocation requests smaller than big_page_size_ unless
// the caller is ok to take more than they asked for. Used by MetaDataAlloc.
if (actual_size == NULL && size < big_page_size_) {
if (!FLAGS_memfs_malloc_disable_fallback &&
actual_size == NULL && size < big_page_size_) {
return fallback_->Alloc(size, actual_size, alignment);
}
@ -135,13 +140,15 @@ void* HugetlbSysAllocator::Alloc(size_t size, size_t *actual_size,
if (new_alignment < big_page_size_) new_alignment = big_page_size_;
size_t aligned_size = ((size + new_alignment - 1) /
new_alignment) * new_alignment;
if (aligned_size < size) {
if (!FLAGS_memfs_malloc_disable_fallback && aligned_size < size) {
return fallback_->Alloc(size, actual_size, alignment);
}
void* result = AllocInternal(aligned_size, actual_size, new_alignment);
if (result != NULL) {
return result;
} else if (FLAGS_memfs_malloc_disable_fallback) {
return NULL;
}
Log(kLog, __FILE__, __LINE__,
"HugetlbSysAllocator: (failed, allocated)", failed_, hugetlb_base_);