mirror of
https://github.com/gperftools/gperftools
synced 2025-01-18 12:51:02 +00:00
Fix data race setting size_left_ in ThreadCache::SetMaxSize
This commit is to fix the data race in ThreadCache::SetMaxSize. ThreadCache::size_left_ is removed and ThreadCache::size_ is added. ThreadCache::size_left_ was introduced for optimization. It is updated in several functions of ThreadCache, including the ThreadCache::SetMaxSize. But thread A can update size_left_ of thread B via SetMaxSize without protection or synchronization. There should not be data race around ThreadCache::size_, for it isn't accessed by multi threads. The optimization of tail-call in tc_{malloc, new, free} is kept and no other logics are affected.
This commit is contained in:
parent
235471f965
commit
6e3a702fb9
@ -76,7 +76,7 @@ bool ThreadCache::tsd_inited_ = false;
|
||||
pthread_key_t ThreadCache::heap_key_;
|
||||
|
||||
void ThreadCache::Init(pthread_t tid) {
|
||||
size_left_ = 0;
|
||||
size_ = 0;
|
||||
|
||||
max_size_ = 0;
|
||||
IncreaseCacheLimitLocked();
|
||||
@ -126,7 +126,7 @@ void* ThreadCache::FetchFromCentralCache(uint32 cl, int32_t byte_size) {
|
||||
|
||||
ASSERT((start == NULL) == (fetch_count == 0));
|
||||
if (--fetch_count >= 0) {
|
||||
size_left_ -= byte_size * fetch_count;
|
||||
size_ += byte_size * fetch_count;
|
||||
list->PushRange(fetch_count, SLL_Next(start), end);
|
||||
}
|
||||
|
||||
@ -152,7 +152,7 @@ void* ThreadCache::FetchFromCentralCache(uint32 cl, int32_t byte_size) {
|
||||
}
|
||||
|
||||
void ThreadCache::ListTooLong(FreeList* list, uint32 cl) {
|
||||
size_left_ -= list->object_size();
|
||||
size_ += list->object_size();
|
||||
|
||||
const int batch_size = Static::sizemap()->num_objects_to_move(cl);
|
||||
ReleaseToCentralCache(list, cl, batch_size);
|
||||
@ -176,7 +176,7 @@ void ThreadCache::ListTooLong(FreeList* list, uint32 cl) {
|
||||
}
|
||||
}
|
||||
|
||||
if (PREDICT_FALSE(size_left_ < 0)) {
|
||||
if (PREDICT_FALSE(size_ > max_size_)) {
|
||||
Scavenge();
|
||||
}
|
||||
}
|
||||
@ -199,7 +199,7 @@ void ThreadCache::ReleaseToCentralCache(FreeList* src, uint32 cl, int N) {
|
||||
void *tail, *head;
|
||||
src->PopRange(N, &head, &tail);
|
||||
Static::central_cache()[cl].InsertRange(head, tail, N);
|
||||
size_left_ += delta_bytes;
|
||||
size_ -= delta_bytes;
|
||||
}
|
||||
|
||||
// Release idle memory to the central cache
|
||||
|
@ -81,7 +81,7 @@ class ThreadCache {
|
||||
int freelist_length(uint32 cl) const { return list_[cl].length(); }
|
||||
|
||||
// Total byte size in cache
|
||||
size_t Size() const { return max_size_ - size_left_; }
|
||||
size_t Size() const { return size_; }
|
||||
|
||||
// Allocate an object of the given size and class. The size given
|
||||
// must be the same as the size of the class in the size map.
|
||||
@ -323,12 +323,8 @@ class ThreadCache {
|
||||
|
||||
FreeList list_[kClassSizesMax]; // Array indexed by size-class
|
||||
|
||||
// Thread cache size is max_size_ - size_left_. We use such indirect
|
||||
// representation to speed up some key operations.
|
||||
//
|
||||
// I.e. size_left_ < 0 --> Scavenge()
|
||||
int32 size_left_;
|
||||
int32 max_size_;
|
||||
int32 size_; // Combined size of data
|
||||
int32 max_size_; // size_ > max_size_ --> Scavenge()
|
||||
|
||||
// We sample allocations, biased by the size of the allocation
|
||||
Sampler sampler_; // A sampler
|
||||
@ -381,30 +377,28 @@ inline ATTRIBUTE_ALWAYS_INLINE void* ThreadCache::Allocate(size_t size, uint32 c
|
||||
if (!list->TryPop(&rv)) {
|
||||
return FetchFromCentralCache(cl, size);
|
||||
}
|
||||
size_left_ += size;
|
||||
size_ -= size;
|
||||
return rv;
|
||||
}
|
||||
|
||||
inline ATTRIBUTE_ALWAYS_INLINE void ThreadCache::Deallocate(void* ptr, uint32 cl) {
|
||||
ASSERT(list_[cl].max_length() > 0);
|
||||
FreeList* list = &list_[cl];
|
||||
|
||||
// This catches back-to-back frees of allocs in the same size
|
||||
// class. A more comprehensive (and expensive) test would be to walk
|
||||
// the entire freelist. But this might be enough to find some bugs.
|
||||
ASSERT(ptr != list->Next());
|
||||
|
||||
int32_t size_left = size_left_;
|
||||
uint32_t length = list->Push(ptr);
|
||||
|
||||
if (PREDICT_FALSE(length > list->max_length())) {
|
||||
ListTooLong(list, cl);
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t object_size = list->object_size();
|
||||
size_left_ = size_left -= object_size;
|
||||
|
||||
if (PREDICT_FALSE(size_left < 0)) {
|
||||
|
||||
size_ += list->object_size();
|
||||
if (PREDICT_FALSE(size_ > max_size_)){
|
||||
Scavenge();
|
||||
}
|
||||
}
|
||||
@ -484,9 +478,7 @@ inline bool ThreadCache::IsUseEmergencyMalloc() {
|
||||
}
|
||||
|
||||
inline void ThreadCache::SetMaxSize(int32 new_max_size) {
|
||||
int32 size = Size();
|
||||
max_size_ = new_max_size;
|
||||
size_left_ = max_size_ - size;
|
||||
}
|
||||
|
||||
#ifndef NO_TCMALLOC_SAMPLES
|
||||
|
Loading…
Reference in New Issue
Block a user