amputate HeapProfileTable::Snapshot

Those are remains of heap checker.
This commit is contained in:
Aliaksei Kandratsenka 2024-09-29 13:51:38 -04:00
parent 5035d10c85
commit d87c1a5c77
2 changed files with 0 additions and 206 deletions

View File

@ -337,141 +337,3 @@ void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
#endif
}
HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
address_map_->Iterate([s] (const void* ptr, AllocValue* v) {
s->Add(ptr, *v);
});
return s;
}
void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
s->~Snapshot();
dealloc_(s);
}
HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
Snapshot* base) {
RAW_VLOG(2, "NonLiveSnapshot input: %" PRId64 " %" PRId64 "\n",
total_.allocs - total_.frees,
total_.alloc_size - total_.free_size);
Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
address_map_->Iterate([&] (const void* ptr, AllocValue* v) {
if (v->live()) {
v->set_live(false);
} else {
if (base != nullptr && base->map_.Find(ptr) != nullptr) {
// Present in arg->base, so do not save
} else {
s->Add(ptr, *v);
}
}
});
RAW_VLOG(2, "NonLiveSnapshot output: %" PRId64 " %" PRId64 "\n",
s->total_.allocs - s->total_.frees,
s->total_.alloc_size - s->total_.free_size);
return s;
}
// Information kept per unique bucket seen
struct HeapProfileTable::Snapshot::Entry {
int count;
size_t bytes;
Bucket* bucket;
Entry() : count(0), bytes(0) { }
// Order by decreasing bytes
bool operator<(const Entry& x) const {
return this->bytes > x.bytes;
}
};
void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
const char* filename,
bool should_symbolize) {
// This is only used by the heap leak checker, but is intimately
// tied to the allocation map that belongs in this module and is
// therefore placed here.
RAW_LOG(ERROR, "Leak check %s detected leaks of %zu bytes "
"in %zu objects",
checker_name,
size_t(total_.alloc_size),
size_t(total_.allocs));
// Group objects by Bucket
std::map<Bucket*, Entry> buckets;
map_.Iterate([&] (const void* ptr, AllocValue* v) {
Entry* e = &buckets[v->bucket()]; // Creates empty Entry first time
e->bucket = v->bucket();
e->count++;
e->bytes += v->bytes;
});
// Sort buckets by decreasing leaked size
const int n = buckets.size();
Entry* entries = new Entry[n];
int dst = 0;
for (std::map<Bucket*,Entry>::const_iterator iter = buckets.begin();
iter != buckets.end();
++iter) {
entries[dst++] = iter->second;
}
std::sort(entries, entries + n);
// Report a bounded number of leaks to keep the leak report from
// growing too long.
const int to_report =
(FLAGS_heap_check_max_leaks > 0 &&
n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
RAW_LOG(ERROR, "The %d largest leaks:", to_report);
// Print
SymbolTable symbolization_table;
for (int i = 0; i < to_report; i++) {
const Entry& e = entries[i];
for (int j = 0; j < e.bucket->depth; j++) {
symbolization_table.Add(e.bucket->stack[j]);
}
}
if (should_symbolize)
symbolization_table.Symbolize();
{
auto do_log = +[] (const char* buf, size_t amt) {
RAW_LOG(ERROR, "%.*s", amt, buf);
};
constexpr int kBufSize = 2<<10;
tcmalloc::WriteFnWriter<decltype(do_log), kBufSize> printer{do_log};
for (int i = 0; i < to_report; i++) {
const Entry& e = entries[i];
printer.AppendF("Leak of %zu bytes in %d objects allocated from:\n",
e.bytes, e.count);
for (int j = 0; j < e.bucket->depth; j++) {
const void* pc = e.bucket->stack[j];
printer.AppendF("\t@ %" PRIxPTR " %s\n",
reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
}
}
}
if (to_report < n) {
RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
to_report, n-1);
}
delete[] entries;
if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
}
}
void HeapProfileTable::Snapshot::ReportIndividualObjects() {
map_.Iterate([] (const void* ptr, AllocValue* v) {
// Perhaps also log the allocation stack trace (unsymbolized)
// on this line in case somebody finds it useful.
RAW_LOG(ERROR, "leaked %zu byte object %p", v->bytes, ptr);
});
}

View File

@ -137,26 +137,7 @@ class HeapProfileTable {
// Cleanup any old profile files matching prefix + ".*" + kFileExt.
static void CleanupOldProfiles(const char* prefix);
// Return a snapshot of the current contents of *this.
// Caller must call ReleaseSnapshot() on result when no longer needed.
// The result is only valid while this exists and until
// the snapshot is discarded by calling ReleaseSnapshot().
class Snapshot;
Snapshot* TakeSnapshot();
// Release a previously taken snapshot. snapshot must not
// be used after this call.
void ReleaseSnapshot(Snapshot* snapshot);
// Return a snapshot of every non-live, non-ignored object in *this.
// If "base" is non-nullptr, skip any objects present in "base". As
// a side-effect, clears the "live" bit on every live object in
// *this. Caller must call ReleaseSnapshot() on result when no
// longer needed.
Snapshot* NonLiveSnapshot(Snapshot* base);
private:
// data types ----------------------------
// Hash table bucket to hold (de)allocation stats
@ -246,53 +227,4 @@ class HeapProfileTable {
DISALLOW_COPY_AND_ASSIGN(HeapProfileTable);
};
class HeapProfileTable::Snapshot {
public:
const Stats& total() const { return total_; }
// Report anything in this snapshot as a leak.
// May use new/delete for temporary storage.
// If should_symbolize is true, will fork (which is not threadsafe)
// to turn addresses into symbol names. Set to false for maximum safety.
// Also writes a heap profile to "filename" that contains
// all of the objects in this snapshot.
void ReportLeaks(const char* checker_name, const char* filename,
bool should_symbolize);
// Report the addresses of all leaked objects.
// May use new/delete for temporary storage.
void ReportIndividualObjects();
bool Empty() const {
return (total_.allocs == 0) && (total_.alloc_size == 0);
}
private:
friend class HeapProfileTable;
// Total count/size are stored in a Bucket so we can reuse UnparseBucket
Bucket total_;
// We share the Buckets managed by the parent table, but have our
// own object->bucket map.
AllocationMap map_;
Snapshot(Allocator alloc, DeAllocator dealloc) : map_(alloc, dealloc) {
memset(&total_, 0, sizeof(total_));
}
// Callback used to populate a Snapshot object with entries found
// in another allocation map.
inline void Add(const void* ptr, const AllocValue& v) {
map_.Insert(ptr, v);
total_.allocs++;
total_.alloc_size += v.bytes;
}
// Helpers for sorting and generating leak reports
struct Entry;
DISALLOW_COPY_AND_ASSIGN(Snapshot);
};
#endif // BASE_HEAP_PROFILE_TABLE_H_