mirror of
https://github.com/gperftools/gperftools
synced 2025-01-18 12:51:02 +00:00
amputate unused LowLevelAlloc flags
This commit is contained in:
parent
6babeb1a12
commit
2cd876cdfd
@ -182,7 +182,6 @@ struct LowLevelAlloc::Arena {
|
||||
|
||||
AllocList freelist; // head of free list; sorted by addr (under mu)
|
||||
int32_t allocation_count{}; // count of allocated blocks (under mu)
|
||||
int32_t flags{}; // flags passed to NewArena (ro after init)
|
||||
size_t pagesize{}; // ==getpagesize() (init under mu, then ro)
|
||||
size_t roundup{}; // lowest power of 2 >= max(16,sizeof (AllocList))
|
||||
// (init under mu, then ro)
|
||||
@ -196,19 +195,13 @@ struct LowLevelAlloc::Arena {
|
||||
// pointer.
|
||||
static struct LowLevelAlloc::Arena default_arena;
|
||||
|
||||
// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
|
||||
// do not want malloc hook reporting, so that for them there's no malloc hook
|
||||
// reporting even during arena creation.
|
||||
static struct LowLevelAlloc::Arena unhooked_arena;
|
||||
static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
|
||||
|
||||
namespace {
|
||||
|
||||
class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator {
|
||||
public:
|
||||
virtual ~DefaultPagesAllocator() {};
|
||||
virtual void *MapPages(int32_t flags, size_t size);
|
||||
virtual void UnMapPages(int32_t flags, void *addr, size_t size);
|
||||
void *MapPages(size_t size) override;
|
||||
void UnMapPages(void *addr, size_t size) override;
|
||||
};
|
||||
|
||||
}
|
||||
@ -222,38 +215,16 @@ namespace {
|
||||
public:
|
||||
explicit ArenaLock(LowLevelAlloc::Arena *arena)
|
||||
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
|
||||
: left_(false), mask_valid_(false), arena_(arena) {
|
||||
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
|
||||
// We've decided not to support async-signal-safe arena use until
|
||||
// there a demonstrated need. Here's how one could do it though
|
||||
// (would need to be made more portable).
|
||||
#if 0
|
||||
sigset_t all;
|
||||
sigfillset(&all);
|
||||
this->mask_valid_ =
|
||||
(pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
|
||||
#else
|
||||
RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
|
||||
#endif
|
||||
}
|
||||
: left_(false), arena_(arena) {
|
||||
this->arena_->mu.Lock();
|
||||
}
|
||||
~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
|
||||
void Leave() UNLOCK_FUNCTION() {
|
||||
this->arena_->mu.Unlock();
|
||||
#if 0
|
||||
if (this->mask_valid_) {
|
||||
pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
|
||||
}
|
||||
#endif
|
||||
this->left_ = true;
|
||||
}
|
||||
private:
|
||||
bool left_; // whether left region
|
||||
bool mask_valid_;
|
||||
#if 0
|
||||
sigset_t mask_; // old mask of blocked signals
|
||||
#endif
|
||||
LowLevelAlloc::Arena *arena_;
|
||||
DISALLOW_COPY_AND_ASSIGN(ArenaLock);
|
||||
};
|
||||
@ -284,43 +255,23 @@ static void ArenaInit(LowLevelAlloc::Arena *arena) {
|
||||
arena->freelist.levels = 0;
|
||||
memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
|
||||
arena->allocation_count = 0;
|
||||
if (arena == &default_arena) {
|
||||
// Default arena should be hooked, e.g. for heap-checker to trace
|
||||
// pointer chains through objects in the default arena.
|
||||
arena->flags = LowLevelAlloc::kCallMallocHook;
|
||||
} else if (arena == &unhooked_async_sig_safe_arena) {
|
||||
arena->flags = LowLevelAlloc::kAsyncSignalSafe;
|
||||
} else {
|
||||
arena->flags = 0; // other arenas' flags may be overridden by client,
|
||||
// but unhooked_arena will have 0 in 'flags'.
|
||||
}
|
||||
arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator();
|
||||
}
|
||||
}
|
||||
|
||||
// L < meta_data_arena->mu
|
||||
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags,
|
||||
Arena *meta_data_arena) {
|
||||
return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL);
|
||||
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(Arena *meta_data_arena) {
|
||||
return NewArenaWithCustomAlloc(meta_data_arena, NULL);
|
||||
}
|
||||
|
||||
// L < meta_data_arena->mu
|
||||
LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32_t flags,
|
||||
Arena *meta_data_arena,
|
||||
LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(Arena *meta_data_arena,
|
||||
PagesAllocator *allocator) {
|
||||
RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
|
||||
if (meta_data_arena == &default_arena) {
|
||||
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
|
||||
meta_data_arena = &unhooked_async_sig_safe_arena;
|
||||
} else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
|
||||
meta_data_arena = &unhooked_arena;
|
||||
}
|
||||
}
|
||||
// Arena(0) uses the constructor for non-static contexts
|
||||
Arena *result =
|
||||
new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena();
|
||||
ArenaInit(result);
|
||||
result->flags = flags;
|
||||
if (allocator) {
|
||||
result->allocator = allocator;
|
||||
}
|
||||
@ -329,7 +280,7 @@ LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32_t flags,
|
||||
|
||||
// L < arena->mu, L < arena->arena->mu
|
||||
bool LowLevelAlloc::DeleteArena(Arena *arena) {
|
||||
RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
|
||||
RAW_CHECK(arena != nullptr && arena != &default_arena,
|
||||
"may not delete default arena");
|
||||
ArenaLock section(arena);
|
||||
bool empty = (arena->allocation_count == 0);
|
||||
@ -348,11 +299,11 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) {
|
||||
"empty arena has non-page-aligned block size");
|
||||
RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
|
||||
"empty arena has non-page-aligned block");
|
||||
int munmap_result = tcmalloc::DirectMUnMap((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0,
|
||||
region, size);
|
||||
int munmap_result = tcmalloc::DirectMUnMap(true, region, size);
|
||||
RAW_CHECK(munmap_result == 0,
|
||||
"LowLevelAlloc::DeleteArena: munmap failed address");
|
||||
}
|
||||
// TODO: remember which meta data arena was used?
|
||||
Free(arena);
|
||||
}
|
||||
return empty;
|
||||
@ -431,9 +382,6 @@ void LowLevelAlloc::Free(void *v) {
|
||||
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
|
||||
"bad magic number in Free()");
|
||||
LowLevelAlloc::Arena *arena = f->header.arena;
|
||||
if ((arena->flags & kCallMallocHook) != 0) {
|
||||
MallocHook::InvokeDeleteHook(v);
|
||||
}
|
||||
ArenaLock section(arena);
|
||||
AddToFreelist(v, arena);
|
||||
RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
|
||||
@ -470,7 +418,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
|
||||
// mmap generous 64K chunks to decrease
|
||||
// the chances/impact of fragmentation:
|
||||
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
|
||||
void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size);
|
||||
void *new_pages = arena->allocator->MapPages(new_pages_size);
|
||||
arena->mu.Lock();
|
||||
s = reinterpret_cast<AllocList *>(new_pages);
|
||||
s->header.size = new_pages_size;
|
||||
@ -501,24 +449,12 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
|
||||
}
|
||||
|
||||
void *LowLevelAlloc::Alloc(size_t request) {
|
||||
void *result = DoAllocWithArena(request, &default_arena);
|
||||
if ((default_arena.flags & kCallMallocHook) != 0) {
|
||||
// this call must be directly in the user-called allocator function
|
||||
// for MallocHook::GetCallerStackTrace to work properly
|
||||
MallocHook::InvokeNewHook(result, request);
|
||||
}
|
||||
return result;
|
||||
return DoAllocWithArena(request, &default_arena);
|
||||
}
|
||||
|
||||
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
|
||||
RAW_CHECK(arena != 0, "must pass a valid arena");
|
||||
void *result = DoAllocWithArena(request, arena);
|
||||
if ((arena->flags & kCallMallocHook) != 0) {
|
||||
// this call must be directly in the user-called allocator function
|
||||
// for MallocHook::GetCallerStackTrace to work properly
|
||||
MallocHook::InvokeNewHook(result, request);
|
||||
}
|
||||
return result;
|
||||
RAW_CHECK(arena != nullptr, "must pass a valid arena");
|
||||
return DoAllocWithArena(request, arena);
|
||||
}
|
||||
|
||||
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
|
||||
@ -535,20 +471,16 @@ LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) {
|
||||
return default_pages_allocator.get();
|
||||
}
|
||||
|
||||
void *DefaultPagesAllocator::MapPages(int32_t flags, size_t size) {
|
||||
const bool invoke_hooks = ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0);
|
||||
|
||||
auto result = tcmalloc::DirectAnonMMap(invoke_hooks, size);
|
||||
void *DefaultPagesAllocator::MapPages(size_t size) {
|
||||
auto result = tcmalloc::DirectAnonMMap(true, size);
|
||||
|
||||
RAW_CHECK(result.success, "mmap error");
|
||||
|
||||
return result.addr;
|
||||
}
|
||||
|
||||
void DefaultPagesAllocator::UnMapPages(int32_t flags, void *region, size_t size) {
|
||||
const bool invoke_hooks = ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0);
|
||||
|
||||
int munmap_result = tcmalloc::DirectMUnMap(invoke_hooks, region, size);
|
||||
void DefaultPagesAllocator::UnMapPages(void *region, size_t size) {
|
||||
int munmap_result = tcmalloc::DirectMUnMap(true, region, size);
|
||||
RAW_CHECK(munmap_result == 0,
|
||||
"LowLevelAlloc::DeleteArena: munmap failed address");
|
||||
}
|
||||
|
@ -56,8 +56,8 @@ class LowLevelAlloc {
|
||||
class PagesAllocator {
|
||||
public:
|
||||
virtual ~PagesAllocator();
|
||||
virtual void *MapPages(int32_t flags, size_t size) = 0;
|
||||
virtual void UnMapPages(int32_t flags, void *addr, size_t size) = 0;
|
||||
virtual void *MapPages(size_t size) = 0;
|
||||
virtual void UnMapPages(void *addr, size_t size) = 0;
|
||||
};
|
||||
|
||||
static PagesAllocator *GetDefaultPagesAllocator(void);
|
||||
@ -82,36 +82,16 @@ class LowLevelAlloc {
|
||||
// from which it was allocated.
|
||||
static void Free(void *s) ATTR_MALLOC_SECTION;
|
||||
|
||||
// ATTR_MALLOC_SECTION for Alloc* and Free
|
||||
// are to put all callers of MallocHook::Invoke* in this module
|
||||
// into special section,
|
||||
// so that MallocHook::GetCallerStackTrace can function accurately.
|
||||
// ATTR_MALLOC_SECTION for Alloc* and Free
|
||||
// are to put all callers of MallocHook::Invoke* in this module
|
||||
// into special section,
|
||||
// so that MallocHook::GetCallerStackTrace can function accurately.
|
||||
|
||||
// Create a new arena.
|
||||
// The root metadata for the new arena is allocated in the
|
||||
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
|
||||
// These values may be ored into flags:
|
||||
enum {
|
||||
// Report calls to Alloc() and Free() via the MallocHook interface.
|
||||
// Set in the DefaultArena.
|
||||
kCallMallocHook = 0x0001,
|
||||
|
||||
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
|
||||
// DefaultArena().
|
||||
kAsyncSignalSafe = 0x0002,
|
||||
|
||||
// When used with DefaultArena(), the NewArena() and DeleteArena() calls
|
||||
// obey the flags given explicitly in the NewArena() call, even if those
|
||||
// flags differ from the settings in DefaultArena(). So the call
|
||||
// NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe,
|
||||
// as well as generatating an arena that provides async-signal-safe
|
||||
// Alloc/Free.
|
||||
};
|
||||
static Arena *NewArena(int32_t flags, Arena *meta_data_arena);
|
||||
static Arena *NewArena(Arena *meta_data_arena);
|
||||
|
||||
// note: pages allocator will never be destroyed and allocated pages will never be freed
|
||||
// When allocator is NULL, it's same as NewArena
|
||||
static Arena *NewArenaWithCustomAlloc(int32_t flags, Arena *meta_data_arena, PagesAllocator *allocator);
|
||||
static Arena *NewArenaWithCustomAlloc(Arena *meta_data_arena, PagesAllocator *allocator);
|
||||
|
||||
// Destroys an arena allocated by NewArena and returns true,
|
||||
// provided no allocated blocks remain in the arena.
|
||||
|
@ -58,7 +58,7 @@ static LowLevelAlloc::Arena *emergency_arena;
|
||||
|
||||
class EmergencyArenaPagesAllocator : public LowLevelAlloc::PagesAllocator {
|
||||
~EmergencyArenaPagesAllocator() {}
|
||||
void *MapPages(int32_t flags, size_t size) {
|
||||
void *MapPages(size_t size) override {
|
||||
char *new_end = emergency_arena_end + size;
|
||||
if (new_end > emergency_arena_start + kEmergencyArenaSize) {
|
||||
RAW_LOG(FATAL, "Unable to allocate %zu bytes in emergency zone.", size);
|
||||
@ -67,7 +67,7 @@ class EmergencyArenaPagesAllocator : public LowLevelAlloc::PagesAllocator {
|
||||
emergency_arena_end = new_end;
|
||||
return static_cast<void *>(rv);
|
||||
}
|
||||
void UnMapPages(int32_t flags, void *addr, size_t size) {
|
||||
void UnMapPages(void *addr, size_t size) override {
|
||||
RAW_LOG(FATAL, "UnMapPages is not implemented for emergency arena");
|
||||
}
|
||||
};
|
||||
@ -84,7 +84,7 @@ static void InitEmergencyMalloc(void) {
|
||||
static StaticStorage<EmergencyArenaPagesAllocator> pages_allocator_place;
|
||||
EmergencyArenaPagesAllocator* allocator = pages_allocator_place.Construct();
|
||||
|
||||
emergency_arena = LowLevelAlloc::NewArenaWithCustomAlloc(0, LowLevelAlloc::DefaultArena(), allocator);
|
||||
emergency_arena = LowLevelAlloc::NewArenaWithCustomAlloc(LowLevelAlloc::DefaultArena(), allocator);
|
||||
|
||||
emergency_arena_start_shifted = reinterpret_cast<uintptr_t>(emergency_arena_start) >> kEmergencyArenaShift;
|
||||
|
||||
|
@ -367,7 +367,7 @@ class HeapLeakChecker::Allocator {
|
||||
static void Init() {
|
||||
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
|
||||
RAW_DCHECK(arena_ == NULL, "");
|
||||
arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
|
||||
arena_ = LowLevelAlloc::NewArena(LowLevelAlloc::DefaultArena());
|
||||
}
|
||||
static void Shutdown() {
|
||||
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
|
||||
|
@ -405,8 +405,7 @@ extern "C" void HeapProfilerStart(const char* prefix) {
|
||||
tcmalloc::HookMMapEvents(&mmap_logging_hook_space, LogMappingEvent);
|
||||
}
|
||||
|
||||
heap_profiler_memory =
|
||||
LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
|
||||
heap_profiler_memory = LowLevelAlloc::NewArena(LowLevelAlloc::DefaultArena());
|
||||
|
||||
heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
|
||||
HeapProfileTable(ProfilerMalloc, ProfilerFree, FLAGS_mmap_profile);
|
||||
|
@ -192,7 +192,7 @@ void MemoryRegionMap::Init(int max_stack_depth, bool use_buckets) NO_THREAD_SAFE
|
||||
// Note that Init() can be (and is) sometimes called
|
||||
// already from within an mmap/sbrk hook.
|
||||
recursive_insert = true;
|
||||
arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
|
||||
arena_ = LowLevelAlloc::NewArena(LowLevelAlloc::DefaultArena());
|
||||
recursive_insert = false;
|
||||
HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones
|
||||
// Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
|
||||
|
@ -76,10 +76,7 @@ static bool using_low_level_alloc = false;
|
||||
// before being freed. At the end of the run,
|
||||
// all remaining allocated blocks are freed.
|
||||
// If use_new_arena is true, use a fresh arena, and then delete it.
|
||||
// If call_malloc_hook is true and user_arena is true,
|
||||
// allocations and deallocations are reported via the MallocHook
|
||||
// interface.
|
||||
static void ExerciseAllocator(bool use_new_arena, bool call_malloc_hook, int n) {
|
||||
static void ExerciseAllocator(bool use_new_arena, int n) {
|
||||
typedef std::map<int, BlockDesc> AllocMap;
|
||||
AllocMap allocated;
|
||||
AllocMap::iterator it;
|
||||
@ -87,8 +84,7 @@ static void ExerciseAllocator(bool use_new_arena, bool call_malloc_hook, int n)
|
||||
int rnd;
|
||||
LowLevelAlloc::Arena *arena = 0;
|
||||
if (use_new_arena) {
|
||||
int32_t flags = call_malloc_hook? LowLevelAlloc::kCallMallocHook : 0;
|
||||
arena = LowLevelAlloc::NewArena(flags, LowLevelAlloc::DefaultArena());
|
||||
arena = LowLevelAlloc::NewArena(LowLevelAlloc::DefaultArena());
|
||||
}
|
||||
for (int i = 0; i != n; i++) {
|
||||
if (i != 0 && i % 10000 == 0) {
|
||||
@ -144,47 +140,9 @@ static void ExerciseAllocator(bool use_new_arena, bool call_malloc_hook, int n)
|
||||
}
|
||||
}
|
||||
|
||||
// used for counting allocates and frees
|
||||
static int32_t allocates;
|
||||
static int32_t frees;
|
||||
|
||||
// called on each alloc if kCallMallocHook specified
|
||||
static void AllocHook(const void *p, size_t size) {
|
||||
if (using_low_level_alloc) {
|
||||
allocates++;
|
||||
}
|
||||
}
|
||||
|
||||
// called on each free if kCallMallocHook specified
|
||||
static void FreeHook(const void *p) {
|
||||
if (using_low_level_alloc) {
|
||||
frees++;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(LowLevelAllocTest, Basic) {
|
||||
ASSERT_TRUE(MallocHook::AddNewHook(&AllocHook));
|
||||
ASSERT_TRUE(MallocHook::AddDeleteHook(&FreeHook));
|
||||
ASSERT_EQ(allocates, 0);
|
||||
ASSERT_EQ(frees, 0);
|
||||
|
||||
ExerciseAllocator(false, false, 50000);
|
||||
ASSERT_NE(allocates, 0); // default arena calls hooks
|
||||
ASSERT_NE(frees, 0);
|
||||
for (int i = 0; i != 16; i++) {
|
||||
bool call_hooks = ((i & 1) == 1);
|
||||
allocates = 0;
|
||||
frees = 0;
|
||||
ExerciseAllocator(true, call_hooks, 15000);
|
||||
if (call_hooks) {
|
||||
ASSERT_GT(allocates, 5000); // arena calls hooks
|
||||
ASSERT_GT(frees, 5000);
|
||||
} else {
|
||||
ASSERT_EQ(allocates, 0); // arena doesn't call hooks
|
||||
ASSERT_EQ(frees, 0);
|
||||
}
|
||||
ExerciseAllocator(false, 50000);
|
||||
for (int i = 0; i < 8; i++) {
|
||||
ExerciseAllocator(true, 15000);
|
||||
}
|
||||
|
||||
ASSERT_TRUE(MallocHook::RemoveNewHook(&AllocHook));
|
||||
ASSERT_TRUE(MallocHook::RemoveDeleteHook(&FreeHook));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user