rework and simplify emergency malloc integration
We now wrap StackTraceScope thingy in tcmalloc-specific parts, instead of automagically inside every stacktrace.cc function. TCMalloc bits all need to grab stacktraces via newly introduced tcmalloc::GrabBacktrace (which handles emergency malloc wrapping). New approach eliminates the need for doing fake stacktrace scope. CPU profiler, being distinct .so library couldn't take advantage of emergency malloc anyways. This simplifies the build further and eliminates another potential point of runtime divergence when stacktrace is linked to both libprofiler and libtcmalloc.
This commit is contained in:
parent
4be76ab707
commit
771a01ed28
|
@ -604,9 +604,6 @@ if(WITH_STACK_TRACE)
|
|||
target_link_libraries(stacktrace INTERFACE ${unwind_libs} ${LIBSPINLOCK})
|
||||
target_sources(stacktrace INTERFACE $<TARGET_OBJECTS:stacktrace_object>)
|
||||
|
||||
set(libfake_stacktrace_scope_la_SOURCES src/fake_stacktrace_scope.cc)
|
||||
add_library(fake_stacktrace_scope ${libfake_stacktrace_scope_la_SOURCES})
|
||||
|
||||
if(BUILD_TESTING)
|
||||
set(STACKTRACE_UNITTEST_INCLUDES src/config_for_unittests.h
|
||||
src/base/commandlineflags.h
|
||||
|
@ -614,7 +611,7 @@ if(WITH_STACK_TRACE)
|
|||
${LOGGING_INCLUDES})
|
||||
|
||||
add_executable(stacktrace_unittest src/tests/stacktrace_unittest.cc ${libstacktrace_la_SOURCES})
|
||||
target_link_libraries(stacktrace_unittest logging fake_stacktrace_scope ${LIBSPINLOCK} ${unwind_libs})
|
||||
target_link_libraries(stacktrace_unittest logging ${LIBSPINLOCK} ${unwind_libs})
|
||||
target_compile_definitions(stacktrace_unittest PRIVATE STACKTRACE_IS_TESTED)
|
||||
add_test(stacktrace_unittest stacktrace_unittest)
|
||||
|
||||
|
@ -1017,11 +1014,10 @@ if(GPERFTOOLS_BUILD_HEAP_CHECKER OR GPERFTOOLS_BUILD_HEAP_PROFILER)
|
|||
|
||||
if(gperftools_emergency_malloc)
|
||||
set(EMERGENCY_MALLOC_CC
|
||||
src/emergency_malloc.cc
|
||||
src/emergency_malloc_for_stacktrace.cc)
|
||||
src/emergency_malloc.cc)
|
||||
set(EMERGENCY_MALLOC_DEFINE ENABLE_EMERGENCY_MALLOC)
|
||||
else()
|
||||
set(EMERGENCY_MALLOC_CC src/fake_stacktrace_scope.cc)
|
||||
set(EMERGENCY_MALLOC_CC )
|
||||
endif()
|
||||
|
||||
### Making the library
|
||||
|
@ -1251,7 +1247,7 @@ if(GPERFTOOLS_BUILD_CPU_PROFILER)
|
|||
src/profile-handler.cc
|
||||
src/profiledata.cc
|
||||
${CPU_PROFILER_INCLUDES})
|
||||
set(libprofiler_la_LIBADD stacktrace fake_stacktrace_scope)
|
||||
set(libprofiler_la_LIBADD stacktrace)
|
||||
add_library(profiler SHARED ${libprofiler_la_SOURCES})
|
||||
target_link_libraries(profiler PRIVATE ${libprofiler_la_LIBADD})
|
||||
target_link_libraries(profiler PRIVATE Threads::Threads)
|
||||
|
|
11
Makefile.am
11
Makefile.am
|
@ -199,15 +199,12 @@ libstacktrace_la_SOURCES = src/stacktrace.cc \
|
|||
src/base/vdso_support.cc
|
||||
libstacktrace_la_LIBADD = $(UNWIND_LIBS) $(LIBSPINLOCK)
|
||||
|
||||
noinst_LTLIBRARIES += libfake_stacktrace_scope.la
|
||||
libfake_stacktrace_scope_la_SOURCES = src/fake_stacktrace_scope.cc
|
||||
|
||||
### Unittests
|
||||
TESTS += stacktrace_unittest
|
||||
stacktrace_unittest_SOURCES = src/tests/stacktrace_unittest.cc \
|
||||
$(libstacktrace_la_SOURCES)
|
||||
stacktrace_unittest_CXXFLAGS = $(AM_CXXFLAGS) -DSTACKTRACE_IS_TESTED
|
||||
stacktrace_unittest_LDADD = $(libstacktrace_la_LIBADD) liblogging.la libfake_stacktrace_scope.la $(STACKTRACE_UNITTEST_LIBS)
|
||||
stacktrace_unittest_LDADD = $(libstacktrace_la_LIBADD) liblogging.la $(STACKTRACE_UNITTEST_LIBS)
|
||||
# nice to have. Allows glibc's backtrace_symbols to work.
|
||||
stacktrace_unittest_LDFLAGS = -export-dynamic
|
||||
|
||||
|
@ -657,10 +654,10 @@ perftoolsinclude_HEADERS += src/gperftools/heap-profiler.h \
|
|||
src/gperftools/heap-checker.h
|
||||
|
||||
if BUILD_EMERGENCY_MALLOC
|
||||
EMERGENCY_MALLOC_CC = src/emergency_malloc.cc src/emergency_malloc_for_stacktrace.cc
|
||||
EMERGENCY_MALLOC_CC = src/emergency_malloc.cc
|
||||
EMERGENCY_MALLOC_DEFINE = -DENABLE_EMERGENCY_MALLOC
|
||||
else !BUILD_EMERGENCY_MALLOC
|
||||
EMERGENCY_MALLOC_CC = src/fake_stacktrace_scope.cc
|
||||
EMERGENCY_MALLOC_CC =
|
||||
EMERGENCY_MALLOC_DEFINE =
|
||||
endif !BUILD_EMERGENCY_MALLOC
|
||||
|
||||
|
@ -986,7 +983,7 @@ lib_LTLIBRARIES += libprofiler.la
|
|||
libprofiler_la_SOURCES = src/profiler.cc \
|
||||
src/profile-handler.cc \
|
||||
src/profiledata.cc
|
||||
libprofiler_la_LIBADD = libstacktrace.la libfake_stacktrace_scope.la $(PTHREAD_LIBS)
|
||||
libprofiler_la_LIBADD = libstacktrace.la $(PTHREAD_LIBS)
|
||||
libprofiler_la_CXXFLAGS = $(PTHREAD_CFLAGS) $(AM_CXXFLAGS)
|
||||
# We have to include ProfileData for profiledata_unittest
|
||||
CPU_PROFILER_SYMBOLS = '(ProfilerStart|ProfilerStartWithOptions|ProfilerStop|ProfilerFlush|ProfilerEnable|ProfilerDisable|ProfilingIsEnabledForAllThreads|ProfilerRegisterThread|ProfilerGetCurrentState|ProfilerState|ProfileData|ProfileHandler|ProfilerGetStackTrace)'
|
||||
|
|
|
@ -62,20 +62,20 @@
|
|||
|
||||
#include <gperftools/malloc_extension.h>
|
||||
#include <gperftools/malloc_hook.h>
|
||||
#include <gperftools/stacktrace.h>
|
||||
|
||||
// Will be pulled in as along with tcmalloc.cc
|
||||
// #include <gperftools/tcmalloc.h>
|
||||
|
||||
#include "addressmap-inl.h"
|
||||
#include "base/commandlineflags.h"
|
||||
#include "base/threading.h"
|
||||
#include "base/googleinit.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/spinlock.h"
|
||||
#include "base/threading.h"
|
||||
#include "malloc_hook-inl.h"
|
||||
#include "symbolize.h"
|
||||
#include "maybe_emergency_malloc.h"
|
||||
#include "safe_strerror.h"
|
||||
#include "symbolize.h"
|
||||
|
||||
// NOTE: due to #define below, tcmalloc.cc will omit tc_XXX
|
||||
// definitions. So that debug implementations can be defined
|
||||
|
@ -991,7 +991,7 @@ static int TraceFd() {
|
|||
// Print the hex stack dump on a single line. PCs are separated by tabs.
|
||||
static void TraceStack(void) {
|
||||
void *pcs[16];
|
||||
int n = GetStackTrace(pcs, sizeof(pcs)/sizeof(pcs[0]), 0);
|
||||
int n = tcmalloc::GrabBacktrace(pcs, sizeof(pcs)/sizeof(pcs[0]), 0);
|
||||
for (int i = 0; i != n; i++) {
|
||||
TracePrintf(TraceFd(), "\t%p", pcs[i]);
|
||||
}
|
||||
|
|
|
@ -41,129 +41,131 @@
|
|||
#include "base/low_level_alloc.h"
|
||||
#include "base/spinlock.h"
|
||||
#include "internal_logging.h"
|
||||
|
||||
#include "thread_cache_ptr.h"
|
||||
|
||||
namespace tcmalloc {
|
||||
__attribute__ ((visibility("internal"))) char *emergency_arena_start;
|
||||
__attribute__ ((visibility("internal"))) uintptr_t emergency_arena_start_shifted;
|
||||
|
||||
static CACHELINE_ALIGNED SpinLock emergency_malloc_lock;
|
||||
static char *emergency_arena_end;
|
||||
static LowLevelAlloc::Arena *emergency_arena;
|
||||
ATTRIBUTE_HIDDEN char *emergency_arena_start;
|
||||
ATTRIBUTE_HIDDEN uintptr_t emergency_arena_start_shifted;
|
||||
|
||||
class EmergencyArenaPagesAllocator : public LowLevelAlloc::PagesAllocator {
|
||||
~EmergencyArenaPagesAllocator() {}
|
||||
void *MapPages(int32_t flags, size_t size) {
|
||||
char *new_end = emergency_arena_end + size;
|
||||
if (new_end > emergency_arena_start + kEmergencyArenaSize) {
|
||||
RAW_LOG(FATAL, "Unable to allocate %zu bytes in emergency zone.", size);
|
||||
}
|
||||
char *rv = emergency_arena_end;
|
||||
emergency_arena_end = new_end;
|
||||
return static_cast<void *>(rv);
|
||||
static CACHELINE_ALIGNED SpinLock emergency_malloc_lock;
|
||||
static char *emergency_arena_end;
|
||||
static LowLevelAlloc::Arena *emergency_arena;
|
||||
|
||||
class EmergencyArenaPagesAllocator : public LowLevelAlloc::PagesAllocator {
|
||||
~EmergencyArenaPagesAllocator() {}
|
||||
void *MapPages(int32_t flags, size_t size) {
|
||||
char *new_end = emergency_arena_end + size;
|
||||
if (new_end > emergency_arena_start + kEmergencyArenaSize) {
|
||||
RAW_LOG(FATAL, "Unable to allocate %zu bytes in emergency zone.", size);
|
||||
}
|
||||
void UnMapPages(int32_t flags, void *addr, size_t size) {
|
||||
RAW_LOG(FATAL, "UnMapPages is not implemented for emergency arena");
|
||||
}
|
||||
};
|
||||
|
||||
static union {
|
||||
char bytes[sizeof(EmergencyArenaPagesAllocator)];
|
||||
void *ptr;
|
||||
} pages_allocator_place;
|
||||
|
||||
static void InitEmergencyMalloc(void) {
|
||||
constexpr int32_t flags = LowLevelAlloc::kAsyncSignalSafe;
|
||||
|
||||
void *arena = LowLevelAlloc::GetDefaultPagesAllocator()->MapPages(flags, kEmergencyArenaSize * 2);
|
||||
|
||||
uintptr_t arena_ptr = reinterpret_cast<uintptr_t>(arena);
|
||||
uintptr_t ptr = (arena_ptr + kEmergencyArenaSize - 1) & ~(kEmergencyArenaSize-1);
|
||||
|
||||
emergency_arena_end = emergency_arena_start = reinterpret_cast<char *>(ptr);
|
||||
EmergencyArenaPagesAllocator *allocator = new (pages_allocator_place.bytes) EmergencyArenaPagesAllocator();
|
||||
emergency_arena = LowLevelAlloc::NewArenaWithCustomAlloc(0, LowLevelAlloc::DefaultArena(), allocator);
|
||||
|
||||
emergency_arena_start_shifted = reinterpret_cast<uintptr_t>(emergency_arena_start) >> kEmergencyArenaShift;
|
||||
|
||||
uintptr_t head_unmap_size = ptr - arena_ptr;
|
||||
CHECK_CONDITION(head_unmap_size < kEmergencyArenaSize);
|
||||
if (head_unmap_size != 0) {
|
||||
LowLevelAlloc::GetDefaultPagesAllocator()->UnMapPages(flags, arena, ptr - arena_ptr);
|
||||
}
|
||||
|
||||
uintptr_t tail_unmap_size = kEmergencyArenaSize - head_unmap_size;
|
||||
void *tail_start = reinterpret_cast<void *>(arena_ptr + head_unmap_size + kEmergencyArenaSize);
|
||||
LowLevelAlloc::GetDefaultPagesAllocator()->UnMapPages(flags, tail_start, tail_unmap_size);
|
||||
char *rv = emergency_arena_end;
|
||||
emergency_arena_end = new_end;
|
||||
return static_cast<void *>(rv);
|
||||
}
|
||||
|
||||
PERFTOOLS_DLL_DECL void *EmergencyMalloc(size_t size) {
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
|
||||
if (emergency_arena_start == NULL) {
|
||||
InitEmergencyMalloc();
|
||||
CHECK_CONDITION(emergency_arena_start != NULL);
|
||||
}
|
||||
|
||||
void *rv = LowLevelAlloc::AllocWithArena(size, emergency_arena);
|
||||
if (rv == NULL) {
|
||||
errno = ENOMEM;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
PERFTOOLS_DLL_DECL void EmergencyFree(void *p) {
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
if (emergency_arena_start == NULL) {
|
||||
InitEmergencyMalloc();
|
||||
CHECK_CONDITION(emergency_arena_start != NULL);
|
||||
free(p);
|
||||
return;
|
||||
}
|
||||
CHECK_CONDITION(emergency_arena_start);
|
||||
LowLevelAlloc::Free(p);
|
||||
}
|
||||
|
||||
PERFTOOLS_DLL_DECL void *EmergencyRealloc(void *_old_ptr, size_t new_size) {
|
||||
if (_old_ptr == NULL) {
|
||||
return EmergencyMalloc(new_size);
|
||||
}
|
||||
if (new_size == 0) {
|
||||
EmergencyFree(_old_ptr);
|
||||
return NULL;
|
||||
}
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
CHECK_CONDITION(emergency_arena_start);
|
||||
|
||||
char *old_ptr = static_cast<char *>(_old_ptr);
|
||||
CHECK_CONDITION(old_ptr <= emergency_arena_end);
|
||||
CHECK_CONDITION(emergency_arena_start <= old_ptr);
|
||||
|
||||
// NOTE: we don't know previous size of old_ptr chunk. So instead
|
||||
// of trying to figure out right size of copied memory, we just
|
||||
// copy largest possible size. We don't care about being slow.
|
||||
size_t old_ptr_size = emergency_arena_end - old_ptr;
|
||||
size_t copy_size = (new_size < old_ptr_size) ? new_size : old_ptr_size;
|
||||
|
||||
void *new_ptr = LowLevelAlloc::AllocWithArena(new_size, emergency_arena);
|
||||
if (new_ptr == NULL) {
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
memcpy(new_ptr, old_ptr, copy_size);
|
||||
|
||||
LowLevelAlloc::Free(old_ptr);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
PERFTOOLS_DLL_DECL void *EmergencyCalloc(size_t n, size_t elem_size) {
|
||||
// Overflow check
|
||||
const size_t size = n * elem_size;
|
||||
if (elem_size != 0 && size / elem_size != n) return NULL;
|
||||
void *rv = EmergencyMalloc(size);
|
||||
if (rv != NULL) {
|
||||
memset(rv, 0, size);
|
||||
}
|
||||
return rv;
|
||||
void UnMapPages(int32_t flags, void *addr, size_t size) {
|
||||
RAW_LOG(FATAL, "UnMapPages is not implemented for emergency arena");
|
||||
}
|
||||
};
|
||||
|
||||
static void InitEmergencyMalloc(void) {
|
||||
constexpr int32_t flags = LowLevelAlloc::kAsyncSignalSafe;
|
||||
|
||||
void *arena = LowLevelAlloc::GetDefaultPagesAllocator()->MapPages(flags, kEmergencyArenaSize * 2);
|
||||
|
||||
uintptr_t arena_ptr = reinterpret_cast<uintptr_t>(arena);
|
||||
uintptr_t ptr = (arena_ptr + kEmergencyArenaSize - 1) & ~(kEmergencyArenaSize-1);
|
||||
|
||||
emergency_arena_end = emergency_arena_start = reinterpret_cast<char *>(ptr);
|
||||
|
||||
static struct alignas(alignof(EmergencyArenaPagesAllocator)) {
|
||||
uint8_t bytes[sizeof(EmergencyArenaPagesAllocator)];
|
||||
} pages_allocator_place;
|
||||
|
||||
EmergencyArenaPagesAllocator *allocator = new (&pages_allocator_place) EmergencyArenaPagesAllocator();
|
||||
emergency_arena = LowLevelAlloc::NewArenaWithCustomAlloc(0, LowLevelAlloc::DefaultArena(), allocator);
|
||||
|
||||
emergency_arena_start_shifted = reinterpret_cast<uintptr_t>(emergency_arena_start) >> kEmergencyArenaShift;
|
||||
|
||||
uintptr_t head_unmap_size = ptr - arena_ptr;
|
||||
CHECK_CONDITION(head_unmap_size < kEmergencyArenaSize);
|
||||
if (head_unmap_size != 0) {
|
||||
LowLevelAlloc::GetDefaultPagesAllocator()->UnMapPages(flags, arena, ptr - arena_ptr);
|
||||
}
|
||||
|
||||
uintptr_t tail_unmap_size = kEmergencyArenaSize - head_unmap_size;
|
||||
void *tail_start = reinterpret_cast<void *>(arena_ptr + head_unmap_size + kEmergencyArenaSize);
|
||||
LowLevelAlloc::GetDefaultPagesAllocator()->UnMapPages(flags, tail_start, tail_unmap_size);
|
||||
}
|
||||
|
||||
ATTRIBUTE_HIDDEN void *EmergencyMalloc(size_t size) {
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
|
||||
if (emergency_arena_start == NULL) {
|
||||
InitEmergencyMalloc();
|
||||
CHECK_CONDITION(emergency_arena_start != NULL);
|
||||
}
|
||||
|
||||
void *rv = LowLevelAlloc::AllocWithArena(size, emergency_arena);
|
||||
if (rv == NULL) {
|
||||
errno = ENOMEM;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
ATTRIBUTE_HIDDEN void EmergencyFree(void *p) {
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
if (emergency_arena_start == NULL) {
|
||||
InitEmergencyMalloc();
|
||||
CHECK_CONDITION(emergency_arena_start != NULL);
|
||||
free(p);
|
||||
return;
|
||||
}
|
||||
CHECK_CONDITION(emergency_arena_start);
|
||||
LowLevelAlloc::Free(p);
|
||||
}
|
||||
|
||||
ATTRIBUTE_HIDDEN void *EmergencyRealloc(void *_old_ptr, size_t new_size) {
|
||||
if (_old_ptr == NULL) {
|
||||
return EmergencyMalloc(new_size);
|
||||
}
|
||||
if (new_size == 0) {
|
||||
EmergencyFree(_old_ptr);
|
||||
return NULL;
|
||||
}
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
CHECK_CONDITION(emergency_arena_start);
|
||||
|
||||
char *old_ptr = static_cast<char *>(_old_ptr);
|
||||
CHECK_CONDITION(old_ptr <= emergency_arena_end);
|
||||
CHECK_CONDITION(emergency_arena_start <= old_ptr);
|
||||
|
||||
// NOTE: we don't know previous size of old_ptr chunk. So instead
|
||||
// of trying to figure out right size of copied memory, we just
|
||||
// copy largest possible size. We don't care about being slow.
|
||||
size_t old_ptr_size = emergency_arena_end - old_ptr;
|
||||
size_t copy_size = (new_size < old_ptr_size) ? new_size : old_ptr_size;
|
||||
|
||||
void *new_ptr = LowLevelAlloc::AllocWithArena(new_size, emergency_arena);
|
||||
if (new_ptr == NULL) {
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
memcpy(new_ptr, old_ptr, copy_size);
|
||||
|
||||
LowLevelAlloc::Free(old_ptr);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
ATTRIBUTE_HIDDEN void *EmergencyCalloc(size_t n, size_t elem_size) {
|
||||
// Overflow check
|
||||
const size_t size = n * elem_size;
|
||||
if (elem_size != 0 && size / elem_size != n) return NULL;
|
||||
void *rv = EmergencyMalloc(size);
|
||||
if (rv != NULL) {
|
||||
memset(rv, 0, size);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
} // namespace tcmalloc
|
||||
|
|
|
@ -36,24 +36,49 @@
|
|||
|
||||
#include "base/basictypes.h"
|
||||
#include "common.h"
|
||||
#include "thread_cache_ptr.h"
|
||||
|
||||
namespace tcmalloc {
|
||||
static const uintptr_t kEmergencyArenaShift = 20+4; // 16 megs
|
||||
static const uintptr_t kEmergencyArenaSize = 1 << kEmergencyArenaShift;
|
||||
|
||||
extern __attribute__ ((visibility("internal"))) char *emergency_arena_start;
|
||||
extern __attribute__ ((visibility("internal"))) uintptr_t emergency_arena_start_shifted;;
|
||||
static constexpr uintptr_t kEmergencyArenaShift = 20+4; // 16 megs
|
||||
static constexpr uintptr_t kEmergencyArenaSize = uintptr_t{1} << kEmergencyArenaShift;
|
||||
|
||||
PERFTOOLS_DLL_DECL void *EmergencyMalloc(size_t size);
|
||||
PERFTOOLS_DLL_DECL void EmergencyFree(void *p);
|
||||
PERFTOOLS_DLL_DECL void *EmergencyCalloc(size_t n, size_t elem_size);
|
||||
PERFTOOLS_DLL_DECL void *EmergencyRealloc(void *old_ptr, size_t new_size);
|
||||
ATTRIBUTE_HIDDEN extern char *emergency_arena_start;
|
||||
ATTRIBUTE_HIDDEN extern uintptr_t emergency_arena_start_shifted;;
|
||||
|
||||
static inline bool IsEmergencyPtr(const void *_ptr) {
|
||||
uintptr_t ptr = reinterpret_cast<uintptr_t>(_ptr);
|
||||
return PREDICT_FALSE((ptr >> kEmergencyArenaShift) == emergency_arena_start_shifted)
|
||||
&& emergency_arena_start_shifted;
|
||||
ATTRIBUTE_HIDDEN void *EmergencyMalloc(size_t size);
|
||||
ATTRIBUTE_HIDDEN void EmergencyFree(void *p);
|
||||
ATTRIBUTE_HIDDEN void *EmergencyCalloc(size_t n, size_t elem_size);
|
||||
ATTRIBUTE_HIDDEN void *EmergencyRealloc(void *old_ptr, size_t new_size);
|
||||
|
||||
static inline bool IsEmergencyPtr(const void *_ptr) {
|
||||
uintptr_t ptr = reinterpret_cast<uintptr_t>(_ptr);
|
||||
return PREDICT_FALSE((ptr >> kEmergencyArenaShift) == emergency_arena_start_shifted)
|
||||
&& emergency_arena_start_shifted;
|
||||
}
|
||||
|
||||
class StacktraceScope {
|
||||
public:
|
||||
StacktraceScope() : stacktrace_allowed_(EnterStacktraceScope()) { }
|
||||
bool IsStacktraceAllowed() {
|
||||
return stacktrace_allowed_;
|
||||
}
|
||||
~StacktraceScope() {
|
||||
if (stacktrace_allowed_) {
|
||||
tcmalloc::ResetUseEmergencyMalloc();
|
||||
}
|
||||
}
|
||||
private:
|
||||
static bool EnterStacktraceScope() {
|
||||
if (tcmalloc::IsUseEmergencyMalloc()) {
|
||||
return false;
|
||||
}
|
||||
tcmalloc::SetUseEmergencyMalloc();
|
||||
return true;
|
||||
}
|
||||
|
||||
const bool stacktrace_allowed_;
|
||||
};
|
||||
|
||||
} // namespace tcmalloc
|
||||
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
|
||||
// Copyright (c) 2014, gperftools Contributors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#include "emergency_malloc.h"
|
||||
#include "thread_cache.h"
|
||||
#include "thread_cache_ptr.h"
|
||||
|
||||
namespace tcmalloc {
|
||||
bool EnterStacktraceScope(void);
|
||||
void LeaveStacktraceScope(void);
|
||||
}
|
||||
|
||||
bool tcmalloc::EnterStacktraceScope(void) {
|
||||
if (tcmalloc::IsUseEmergencyMalloc()) {
|
||||
return false;
|
||||
}
|
||||
tcmalloc::SetUseEmergencyMalloc();
|
||||
return true;
|
||||
}
|
||||
|
||||
void tcmalloc::LeaveStacktraceScope(void) {
|
||||
tcmalloc::ResetUseEmergencyMalloc();
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
|
||||
// Copyright (c) 2014, gperftools Contributors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "base/basictypes.h"
|
||||
|
||||
namespace tcmalloc {
|
||||
ATTRIBUTE_WEAK bool EnterStacktraceScope(void) {
|
||||
return true;
|
||||
}
|
||||
ATTRIBUTE_WEAK void LeaveStacktraceScope(void) {
|
||||
}
|
||||
}
|
|
@ -51,15 +51,6 @@
|
|||
#include "base/spinlock.h"
|
||||
#include "maybe_emergency_malloc.h"
|
||||
|
||||
// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
|
||||
// you're porting to a system where you really can't get a stacktrace.
|
||||
#ifdef NO_TCMALLOC_SAMPLES
|
||||
// We use #define so code compiles even if you #include stacktrace.h somehow.
|
||||
# define GetStackTrace(stack, depth, skip) (0)
|
||||
#else
|
||||
# include <gperftools/stacktrace.h>
|
||||
#endif
|
||||
|
||||
// __THROW is defined in glibc systems. It means, counter-intuitively,
|
||||
// "This function will never throw an exception." It's an optional
|
||||
// optimization tool, but we may need to use it to match glibc prototypes.
|
||||
|
@ -384,7 +375,7 @@ extern "C" int MallocHook_GetCallerStackTrace(void** result, int max_depth,
|
|||
// Note: this path is inaccurate when a hook is not called directly by an
|
||||
// allocation function but is daisy-chained through another hook,
|
||||
// search for MallocHook::(Get|Set|Invoke)* to find such cases.
|
||||
return GetStackTrace(result, max_depth, skip_count + int(DEBUG_MODE));
|
||||
return tcmalloc::GrabBacktrace(result, max_depth, skip_count + int(DEBUG_MODE));
|
||||
// due to -foptimize-sibling-calls in opt mode
|
||||
// there's no need for extra frame skip here then
|
||||
#else
|
||||
|
@ -398,7 +389,7 @@ extern "C" int MallocHook_GetCallerStackTrace(void** result, int max_depth,
|
|||
// and 3 is to account for some hook daisy chaining.
|
||||
static const int kStackSize = kMaxSkip + 1;
|
||||
void* stack[kStackSize];
|
||||
int depth = GetStackTrace(stack, kStackSize, 1); // skip this function frame
|
||||
int depth = tcmalloc::GrabBacktrace(stack, kStackSize, 1); // skip this function frame
|
||||
if (depth == 0) // silenty propagate cases when GetStackTrace does not work
|
||||
return 0;
|
||||
for (int i = 0; i < depth; ++i) { // stack[0] is our immediate caller
|
||||
|
@ -420,7 +411,7 @@ extern "C" int MallocHook_GetCallerStackTrace(void** result, int max_depth,
|
|||
if (depth < max_depth && depth + i == kStackSize) {
|
||||
// get frames for the missing depth
|
||||
depth +=
|
||||
GetStackTrace(result + depth, max_depth - depth, 1 + kStackSize);
|
||||
tcmalloc::GrabBacktrace(result + depth, max_depth - depth, 1 + kStackSize);
|
||||
}
|
||||
return depth;
|
||||
}
|
||||
|
|
|
@ -33,6 +33,9 @@
|
|||
|
||||
#include "config.h"
|
||||
|
||||
#include "base/basictypes.h"
|
||||
#include "gperftools/stacktrace.h"
|
||||
|
||||
#ifdef ENABLE_EMERGENCY_MALLOC
|
||||
|
||||
#include "emergency_malloc.h"
|
||||
|
@ -40,16 +43,56 @@
|
|||
#else
|
||||
|
||||
namespace tcmalloc {
|
||||
static inline void *EmergencyMalloc(size_t size) {return NULL;}
|
||||
static inline void EmergencyFree(void *p) {}
|
||||
static inline void *EmergencyCalloc(size_t n, size_t elem_size) {return NULL;}
|
||||
static inline void *EmergencyRealloc(void *old_ptr, size_t new_size) {return NULL;}
|
||||
|
||||
static inline bool IsEmergencyPtr(const void *_ptr) {
|
||||
return false;
|
||||
static inline void *EmergencyMalloc(size_t size) {return nullptr;}
|
||||
static inline void EmergencyFree(void *p) {}
|
||||
static inline void *EmergencyCalloc(size_t n, size_t elem_size) {return nullptr;}
|
||||
static inline void *EmergencyRealloc(void *old_ptr, size_t new_size) {return nullptr;}
|
||||
static inline bool IsEmergencyPtr(const void *_ptr) {return false;}
|
||||
|
||||
struct StacktraceScope {
|
||||
static inline int frame_forcer;
|
||||
bool IsStacktraceAllowed() { return true; }
|
||||
~StacktraceScope() {
|
||||
(void)*const_cast<int volatile *>(&frame_forcer);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace tcmalloc
|
||||
|
||||
#endif // ENABLE_EMERGENCY_MALLOC
|
||||
|
||||
namespace tcmalloc {
|
||||
|
||||
#ifdef NO_TCMALLOC_SAMPLES
|
||||
|
||||
inline int GrabBacktrace(void** result, int max_depth, int skip_count) { return 0; }
|
||||
|
||||
#else
|
||||
|
||||
// GrabBacktrace is the API to use when capturing backtrace for
|
||||
// various tcmalloc features. It has optional emergency malloc
|
||||
// integration for occasional case where stacktrace capturing method
|
||||
// calls back to malloc (so we divert those calls to emergency malloc
|
||||
// facility).
|
||||
ATTRIBUTE_HIDDEN ATTRIBUTE_NOINLINE inline
|
||||
int GrabBacktrace(void** result, int max_depth, int skip_count) {
|
||||
StacktraceScope scope;
|
||||
if (!scope.IsStacktraceAllowed()) {
|
||||
return 0;
|
||||
}
|
||||
return GetStackTrace(result, max_depth, skip_count + 1);
|
||||
}
|
||||
|
||||
#endif // ENABLE_EMERGENCY_MALLOC
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace tcmalloc
|
||||
|
||||
// When something includes this file, don't let us use 'regular'
|
||||
// stacktrace API directly.
|
||||
#define GetStackTrace(...) missing
|
||||
#define GetStackTraceWithContext(...) missing
|
||||
#define GetStackFrames(...) missing
|
||||
#define GetStackFramesWithContext(...) missing
|
||||
|
||||
#endif // MAYBE_EMERGENCY_MALLOC_H
|
||||
|
|
|
@ -39,10 +39,11 @@
|
|||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
||||
#include "gperftools/malloc_extension.h" // for MallocRange, etc
|
||||
#include "base/basictypes.h"
|
||||
#include "base/commandlineflags.h"
|
||||
#include "gperftools/malloc_extension.h" // for MallocRange, etc
|
||||
#include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
|
||||
#include "maybe_emergency_malloc.h"
|
||||
#include "page_heap_allocator.h" // for PageHeapAllocator
|
||||
#include "static_vars.h" // for Static
|
||||
#include "system-alloc.h" // for TCMalloc_SystemAlloc, etc
|
||||
|
@ -152,7 +153,7 @@ void PageHeap::HandleUnlock(LockingContext* context) {
|
|||
lock_.Unlock();
|
||||
|
||||
if (t) {
|
||||
t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 0);
|
||||
t->depth = tcmalloc::GrabBacktrace(t->stack, kMaxStackDepth-1, 0);
|
||||
Static::push_growth_stack(t);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,17 +54,6 @@
|
|||
#pragma warning(disable:4251)
|
||||
#endif
|
||||
|
||||
// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
|
||||
// you're porting to a system where you really can't get a stacktrace.
|
||||
// Because we control the definition of GetStackTrace, all clients of
|
||||
// GetStackTrace should #include us rather than stacktrace.h.
|
||||
#ifdef NO_TCMALLOC_SAMPLES
|
||||
// We use #define so code compiles even if you #include stacktrace.h somehow.
|
||||
# define GetStackTrace(stack, depth, skip) (0)
|
||||
#else
|
||||
# include <gperftools/stacktrace.h>
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
struct MallocRange;
|
||||
}
|
||||
|
|
|
@ -288,32 +288,19 @@ static GetStackImplementation *get_stack_impl;
|
|||
|
||||
static void init_default_stack_impl_inner(void);
|
||||
|
||||
namespace tcmalloc {
|
||||
bool EnterStacktraceScope(void);
|
||||
void LeaveStacktraceScope(void);
|
||||
}
|
||||
|
||||
namespace {
|
||||
using tcmalloc::EnterStacktraceScope;
|
||||
using tcmalloc::LeaveStacktraceScope;
|
||||
|
||||
class StacktraceScope {
|
||||
bool stacktrace_allowed;
|
||||
public:
|
||||
StacktraceScope() {
|
||||
stacktrace_allowed = true;
|
||||
stacktrace_allowed = EnterStacktraceScope();
|
||||
struct CaptureScope {
|
||||
void** const result;
|
||||
|
||||
CaptureScope(void** result) : result(result) {
|
||||
init_default_stack_impl_inner();
|
||||
}
|
||||
bool IsStacktraceAllowed() {
|
||||
return stacktrace_allowed;
|
||||
}
|
||||
// NOTE: noinline here ensures that we don't tail-call GetStackXXX
|
||||
// calls below. Which is crucial due to us having to pay attention
|
||||
// to skip_count argument.
|
||||
ATTRIBUTE_NOINLINE ~StacktraceScope() {
|
||||
if (stacktrace_allowed) {
|
||||
LeaveStacktraceScope();
|
||||
}
|
||||
|
||||
~CaptureScope() {
|
||||
// This "work" that we're doing ensures we're not tail-calling
|
||||
// stacktrace capturing implementation.
|
||||
(void)*(const_cast<void* volatile *>(result));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -322,11 +309,8 @@ public:
|
|||
ATTRIBUTE_NOINLINE
|
||||
PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth,
|
||||
int skip_count) {
|
||||
StacktraceScope scope;
|
||||
if (!scope.IsStacktraceAllowed()) {
|
||||
return 0;
|
||||
}
|
||||
init_default_stack_impl_inner();
|
||||
CaptureScope scope(result);;
|
||||
|
||||
return get_stack_impl->GetStackFramesPtr(result, sizes,
|
||||
max_depth, skip_count);
|
||||
}
|
||||
|
@ -334,11 +318,8 @@ PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth,
|
|||
ATTRIBUTE_NOINLINE
|
||||
PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
|
||||
int skip_count, const void *uc) {
|
||||
StacktraceScope scope;
|
||||
if (!scope.IsStacktraceAllowed()) {
|
||||
return 0;
|
||||
}
|
||||
init_default_stack_impl_inner();
|
||||
CaptureScope scope(result);
|
||||
|
||||
return get_stack_impl->GetStackFramesWithContextPtr(result, sizes, max_depth,
|
||||
skip_count, uc);
|
||||
}
|
||||
|
@ -346,22 +327,16 @@ PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int
|
|||
ATTRIBUTE_NOINLINE
|
||||
PERFTOOLS_DLL_DECL int GetStackTrace(void** result, int max_depth,
|
||||
int skip_count) {
|
||||
StacktraceScope scope;
|
||||
if (!scope.IsStacktraceAllowed()) {
|
||||
return 0;
|
||||
}
|
||||
init_default_stack_impl_inner();
|
||||
CaptureScope scope(result);
|
||||
|
||||
return get_stack_impl->GetStackTracePtr(result, max_depth, skip_count);
|
||||
}
|
||||
|
||||
ATTRIBUTE_NOINLINE
|
||||
PERFTOOLS_DLL_DECL int GetStackTraceWithContext(void** result, int max_depth,
|
||||
int skip_count, const void *uc) {
|
||||
StacktraceScope scope;
|
||||
if (!scope.IsStacktraceAllowed()) {
|
||||
return 0;
|
||||
}
|
||||
init_default_stack_impl_inner();
|
||||
CaptureScope scope(result);
|
||||
|
||||
return get_stack_impl->GetStackTraceWithContextPtr(result, max_depth,
|
||||
skip_count, uc);
|
||||
}
|
||||
|
|
|
@ -1193,7 +1193,7 @@ static void* DoSampledAllocation(size_t size) {
|
|||
#ifndef NO_TCMALLOC_SAMPLES
|
||||
// Grab the stack trace outside the heap lock
|
||||
StackTrace tmp;
|
||||
tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1);
|
||||
tmp.depth = tcmalloc::GrabBacktrace(tmp.stack, tcmalloc::kMaxStackDepth, 1);
|
||||
tmp.size = size;
|
||||
|
||||
// Allocate span
|
||||
|
@ -1295,7 +1295,7 @@ void* handle_oom(malloc_fn retry_fn,
|
|||
|
||||
static void ReportLargeAlloc(Length num_pages, void* result) {
|
||||
StackTrace stack;
|
||||
stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1);
|
||||
stack.depth = tcmalloc::GrabBacktrace(stack.stack, tcmalloc::kMaxStackDepth, 1);
|
||||
|
||||
static const int N = 1000;
|
||||
char buffer[N];
|
||||
|
|
|
@ -210,7 +210,6 @@
|
|||
<ClCompile Include="..\..\src\base\proc_maps_iterator.cc" />
|
||||
<ClCompile Include="..\..\src\central_freelist.cc" />
|
||||
<ClCompile Include="..\..\src\common.cc" />
|
||||
<ClCompile Include="..\..\src\fake_stacktrace_scope.cc" />
|
||||
<ClCompile Include="..\..\src\internal_logging.cc" />
|
||||
<ClCompile Include="..\..\src\malloc_extension.cc" />
|
||||
<ClCompile Include="..\..\src\malloc_hook.cc" />
|
||||
|
|
|
@ -101,9 +101,6 @@
|
|||
<ClCompile Include="..\..\src\thread_cache_ptr.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\fake_stacktrace_scope.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\windows\override_functions.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
|
|
@ -209,7 +209,6 @@
|
|||
<ClCompile Include="..\..\src\base\generic_writer.cc" />
|
||||
<ClCompile Include="..\..\src\base\sysinfo.cc" />
|
||||
<ClCompile Include="..\..\src\base\proc_maps_iterator.cc" />
|
||||
<ClCompile Include="..\..\src\fake_stacktrace_scope.cc" />
|
||||
<ClCompile Include="..\..\src\mmap_hook.cc" />
|
||||
<ClCompile Include="..\..\src\malloc_hook.cc" />
|
||||
<ClCompile Include="..\..\src\stacktrace.cc" />
|
||||
|
|
|
@ -50,9 +50,6 @@
|
|||
<ClCompile Include="..\..\src\stacktrace.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\fake_stacktrace_scope.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\src\base\basictypes.h">
|
||||
|
|
|
@ -206,7 +206,6 @@
|
|||
<ClCompile Include="..\..\src\base\proc_maps_iterator.cc" />
|
||||
<ClCompile Include="..\..\src\central_freelist.cc" />
|
||||
<ClCompile Include="..\..\src\common.cc" />
|
||||
<ClCompile Include="..\..\src\fake_stacktrace_scope.cc" />
|
||||
<ClCompile Include="..\..\src\internal_logging.cc" />
|
||||
<ClCompile Include="..\..\src\malloc_extension.cc" />
|
||||
<ClCompile Include="..\..\src\malloc_hook.cc" />
|
||||
|
|
|
@ -38,9 +38,6 @@
|
|||
<ClCompile Include="..\..\src\common.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\fake_stacktrace_scope.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\internal_logging.cc">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
|
Loading…
Reference in New Issue