mass-replace NULL -> nullptr
This commit is contained in:
parent
91b96b92a2
commit
be755a8d3c
|
@ -92,7 +92,7 @@ int main(int argc, char *argv[]) {
|
|||
std::vector<pthread_t> threads(thread_count);
|
||||
|
||||
for (int i = 0; i < thread_count; i++) {
|
||||
int rv = pthread_create(&threads[i], NULL,
|
||||
int rv = pthread_create(&threads[i], nullptr,
|
||||
run_tramp,
|
||||
reinterpret_cast<void *>(given_depth));
|
||||
if (rv) {
|
||||
|
@ -102,7 +102,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
run_tramp(reinterpret_cast<void *>(given_depth));
|
||||
for (int i = 0; i < thread_count; i++) {
|
||||
pthread_join(threads[i], NULL);
|
||||
pthread_join(threads[i], nullptr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ static double measure_once(struct internal_bench *b, long iterations)
|
|||
int rv;
|
||||
double time;
|
||||
|
||||
rv = gettimeofday(&tv_before, NULL);
|
||||
rv = gettimeofday(&tv_before, nullptr);
|
||||
if (rv) {
|
||||
perror("gettimeofday");
|
||||
abort();
|
||||
|
@ -156,7 +156,7 @@ static double measure_once(struct internal_bench *b, long iterations)
|
|||
|
||||
run_body(b, iterations);
|
||||
|
||||
rv = gettimeofday(&tv_after, NULL);
|
||||
rv = gettimeofday(&tv_after, nullptr);
|
||||
if (rv) {
|
||||
perror("gettimeofday");
|
||||
abort();
|
||||
|
|
|
@ -78,9 +78,9 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
/*
|
||||
* Returns a human-readable version string. If major, minor,
|
||||
* and/or patch are not NULL, they are set to the major version,
|
||||
* minor version, and patch-code (a string, usually "").
|
||||
* Returns a human-readable version string. If major, minor, and/or
|
||||
* patch are not nullptr, they are set to the major version, minor
|
||||
* version, and patch-code (a string, usually "").
|
||||
*/
|
||||
PERFTOOLS_DLL_DECL const char* tc_version(int* major, int* minor,
|
||||
const char** patch) PERFTOOLS_NOTHROW;
|
||||
|
|
|
@ -111,7 +111,7 @@ class AddressMap {
|
|||
AddressMap(Allocator alloc, DeAllocator dealloc);
|
||||
~AddressMap();
|
||||
|
||||
// If the map contains an entry for "key", return it. Else return NULL.
|
||||
// If the map contains an entry for "key", return it. Else return nullptr.
|
||||
inline const Value* Find(Key key) const;
|
||||
inline Value* FindMutable(Key key);
|
||||
|
||||
|
@ -130,7 +130,7 @@ class AddressMap {
|
|||
// (at its start or inside of it, but not at the end),
|
||||
// return the address of the associated value
|
||||
// and store its key in "*res_key".
|
||||
// Else return NULL.
|
||||
// Else return nullptr.
|
||||
// max_size specifies largest range size possibly in existence now.
|
||||
typedef size_t (*ValueSizeFunc)(const Value& v);
|
||||
const Value* FindInside(ValueSizeFunc size_func, size_t max_size,
|
||||
|
@ -203,14 +203,14 @@ class AddressMap {
|
|||
|
||||
// Find cluster object for specified address. If not found
|
||||
// and "create" is true, create the object. If not found
|
||||
// and "create" is false, return NULL.
|
||||
// and "create" is false, return nullptr.
|
||||
//
|
||||
// This method is bitwise-const if create is false.
|
||||
Cluster* FindCluster(Number address, bool create) {
|
||||
// Look in hashtable
|
||||
const Number cluster_id = address >> (kBlockBits + kClusterBits);
|
||||
const int h = HashInt(cluster_id);
|
||||
for (Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
|
||||
for (Cluster* c = hashtable_[h]; c != nullptr; c = c->next) {
|
||||
if (c->id == cluster_id) {
|
||||
return c;
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ class AddressMap {
|
|||
hashtable_[h] = c;
|
||||
return c;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Return the block ID for an address within its cluster
|
||||
|
@ -264,17 +264,17 @@ class AddressMap {
|
|||
|
||||
template <class Value>
|
||||
AddressMap<Value>::AddressMap(Allocator alloc, DeAllocator dealloc)
|
||||
: free_(NULL),
|
||||
: free_(nullptr),
|
||||
alloc_(alloc),
|
||||
dealloc_(dealloc),
|
||||
allocated_(NULL) {
|
||||
allocated_(nullptr) {
|
||||
hashtable_ = New<Cluster*>(kHashSize);
|
||||
}
|
||||
|
||||
template <class Value>
|
||||
AddressMap<Value>::~AddressMap() {
|
||||
// De-allocate all of the objects we allocated
|
||||
for (Object* obj = allocated_; obj != NULL; /**/) {
|
||||
for (Object* obj = allocated_; obj != nullptr; /**/) {
|
||||
Object* next = obj->next;
|
||||
(*dealloc_)(obj);
|
||||
obj = next;
|
||||
|
@ -290,14 +290,14 @@ template <class Value>
|
|||
inline Value* AddressMap<Value>::FindMutable(Key key) {
|
||||
const Number num = reinterpret_cast<Number>(key);
|
||||
const Cluster* const c = FindCluster(num, false/*do not create*/);
|
||||
if (c != NULL) {
|
||||
for (Entry* e = c->blocks[BlockID(num)]; e != NULL; e = e->next) {
|
||||
if (c != nullptr) {
|
||||
for (Entry* e = c->blocks[BlockID(num)]; e != nullptr; e = e->next) {
|
||||
if (e->key == key) {
|
||||
return &e->value;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <class Value>
|
||||
|
@ -307,7 +307,7 @@ void AddressMap<Value>::Insert(Key key, Value value) {
|
|||
|
||||
// Look in linked-list for this block
|
||||
const int block = BlockID(num);
|
||||
for (Entry* e = c->blocks[block]; e != NULL; e = e->next) {
|
||||
for (Entry* e = c->blocks[block]; e != nullptr; e = e->next) {
|
||||
if (e->key == key) {
|
||||
e->value = value;
|
||||
return;
|
||||
|
@ -315,7 +315,7 @@ void AddressMap<Value>::Insert(Key key, Value value) {
|
|||
}
|
||||
|
||||
// Create entry
|
||||
if (free_ == NULL) {
|
||||
if (free_ == nullptr) {
|
||||
// Allocate a new batch of entries and add to free-list
|
||||
Entry* array = New<Entry>(ALLOC_COUNT);
|
||||
for (int i = 0; i < ALLOC_COUNT-1; i++) {
|
||||
|
@ -336,8 +336,8 @@ template <class Value>
|
|||
bool AddressMap<Value>::FindAndRemove(Key key, Value* removed_value) {
|
||||
const Number num = reinterpret_cast<Number>(key);
|
||||
Cluster* const c = FindCluster(num, false/*do not create*/);
|
||||
if (c != NULL) {
|
||||
for (Entry** p = &c->blocks[BlockID(num)]; *p != NULL; p = &(*p)->next) {
|
||||
if (c != nullptr) {
|
||||
for (Entry** p = &c->blocks[BlockID(num)]; *p != nullptr; p = &(*p)->next) {
|
||||
Entry* e = *p;
|
||||
if (e->key == key) {
|
||||
*removed_value = e->value;
|
||||
|
@ -360,11 +360,11 @@ const Value* AddressMap<Value>::FindInside(ValueSizeFunc size_func,
|
|||
Number num = key_num; // we'll move this to move back through the clusters
|
||||
while (1) {
|
||||
const Cluster* c = FindCluster(num, false/*do not create*/);
|
||||
if (c != NULL) {
|
||||
if (c != nullptr) {
|
||||
while (1) {
|
||||
const int block = BlockID(num);
|
||||
bool had_smaller_key = false;
|
||||
for (const Entry* e = c->blocks[block]; e != NULL; e = e->next) {
|
||||
for (const Entry* e = c->blocks[block]; e != nullptr; e = e->next) {
|
||||
const Number e_num = reinterpret_cast<Number>(e->key);
|
||||
if (e_num <= key_num) {
|
||||
if (e_num == key_num || // to handle 0-sized ranges
|
||||
|
@ -375,20 +375,21 @@ const Value* AddressMap<Value>::FindInside(ValueSizeFunc size_func,
|
|||
had_smaller_key = true;
|
||||
}
|
||||
}
|
||||
if (had_smaller_key) return NULL; // got a range before 'key'
|
||||
// and it did not contain 'key'
|
||||
if (had_smaller_key) return nullptr; // got a range before
|
||||
// 'key' and it did not
|
||||
// contain 'key'
|
||||
if (block == 0) break;
|
||||
// try address-wise previous block
|
||||
num |= kBlockSize - 1; // start at the last addr of prev block
|
||||
num -= kBlockSize;
|
||||
if (key_num - num > max_size) return NULL;
|
||||
if (key_num - num > max_size) return nullptr;
|
||||
}
|
||||
}
|
||||
if (num < kClusterSize) return NULL; // first cluster
|
||||
if (num < kClusterSize) return nullptr; // first cluster
|
||||
// go to address-wise previous cluster to try
|
||||
num |= kClusterSize - 1; // start at the last block of previous cluster
|
||||
num -= kClusterSize;
|
||||
if (key_num - num > max_size) return NULL;
|
||||
if (key_num - num > max_size) return nullptr;
|
||||
// Having max_size to limit the search is crucial: else
|
||||
// we have to traverse a lot of empty clusters (or blocks).
|
||||
// We can avoid needing max_size if we put clusters into
|
||||
|
@ -402,9 +403,9 @@ void AddressMap<Value>::Iterate(tcmalloc::FunctionRef<void(Key, Value*)> body) c
|
|||
// We could optimize this by traversing only non-empty clusters and/or blocks
|
||||
// but it does not speed up heap-checker noticeably.
|
||||
for (int h = 0; h < kHashSize; ++h) {
|
||||
for (const Cluster* c = hashtable_[h]; c != NULL; c = c->next) {
|
||||
for (const Cluster* c = hashtable_[h]; c != nullptr; c = c->next) {
|
||||
for (int b = 0; b < kClusterBlocks; ++b) {
|
||||
for (Entry* e = c->blocks[b]; e != NULL; e = e->next) {
|
||||
for (Entry* e = c->blocks[b]; e != nullptr; e = e->next) {
|
||||
body(e->key, &e->value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,21 +135,21 @@ namespace tcmalloc {
|
|||
if (!value) {
|
||||
return def;
|
||||
}
|
||||
return strtol(value, NULL, 10);
|
||||
return strtol(value, nullptr, 10);
|
||||
}
|
||||
|
||||
inline long long StringToLongLong(const char *value, long long def) {
|
||||
if (!value) {
|
||||
return def;
|
||||
}
|
||||
return strtoll(value, NULL, 10);
|
||||
return strtoll(value, nullptr, 10);
|
||||
}
|
||||
|
||||
inline double StringToDouble(const char *value, double def) {
|
||||
if (!value) {
|
||||
return def;
|
||||
}
|
||||
return strtod(value, NULL);
|
||||
return strtod(value, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
|
|||
reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
|
||||
version_definition->vd_next);
|
||||
}
|
||||
return version_definition->vd_ndx == index ? version_definition : NULL;
|
||||
return version_definition->vd_ndx == index ? version_definition : nullptr;
|
||||
}
|
||||
|
||||
const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
|
||||
|
@ -180,12 +180,12 @@ const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
|
|||
}
|
||||
|
||||
void ElfMemImage::Init(const void *base) {
|
||||
ehdr_ = NULL;
|
||||
dynsym_ = NULL;
|
||||
dynstr_ = NULL;
|
||||
versym_ = NULL;
|
||||
verdef_ = NULL;
|
||||
hash_ = NULL;
|
||||
ehdr_ = nullptr;
|
||||
dynsym_ = nullptr;
|
||||
dynstr_ = nullptr;
|
||||
versym_ = nullptr;
|
||||
verdef_ = nullptr;
|
||||
hash_ = nullptr;
|
||||
strsize_ = 0;
|
||||
verdefnum_ = 0;
|
||||
link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
|
||||
|
@ -229,7 +229,7 @@ void ElfMemImage::Init(const void *base) {
|
|||
}
|
||||
|
||||
ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
|
||||
const ElfW(Phdr) *dynamic_program_header = NULL;
|
||||
const ElfW(Phdr) *dynamic_program_header = nullptr;
|
||||
for (int i = 0; i < ehdr_->e_phnum; ++i) {
|
||||
const ElfW(Phdr) *const program_header = GetPhdr(i);
|
||||
switch (program_header->p_type) {
|
||||
|
@ -406,7 +406,7 @@ void ElfMemImage::SymbolIterator::Update(int increment) {
|
|||
CHECK(symbol && version_symbol);
|
||||
const char *const symbol_name = image->GetDynstr(symbol->st_name);
|
||||
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
|
||||
const ElfW(Verdef) *version_definition = NULL;
|
||||
const ElfW(Verdef) *version_definition = nullptr;
|
||||
const char *version_name = "";
|
||||
if (symbol->st_shndx == SHN_UNDEF) {
|
||||
// Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
|
||||
|
|
|
@ -86,7 +86,7 @@ class ElfMemImage {
|
|||
|
||||
explicit ElfMemImage(const void *base);
|
||||
void Init(const void *base);
|
||||
bool IsPresent() const { return ehdr_ != NULL; }
|
||||
bool IsPresent() const { return ehdr_ != nullptr; }
|
||||
const ElfW(Phdr)* GetPhdr(int index) const;
|
||||
const ElfW(Sym)* GetDynsym(int index) const;
|
||||
const ElfW(Versym)* GetVersym(int index) const;
|
||||
|
@ -103,14 +103,14 @@ class ElfMemImage {
|
|||
// Look up versioned dynamic symbol in the image.
|
||||
// Returns false if image is not present, or doesn't contain given
|
||||
// symbol/version/type combination.
|
||||
// If info_out != NULL, additional details are filled in.
|
||||
// If info_out != nullptr, additional details are filled in.
|
||||
bool LookupSymbol(const char *name, const char *version,
|
||||
int symbol_type, SymbolInfo *info_out) const;
|
||||
|
||||
// Find info about symbol (if any) which overlaps given address.
|
||||
// Returns true if symbol was found; false if image isn't present
|
||||
// or doesn't have a symbol overlapping given address.
|
||||
// If info_out != NULL, additional details are filled in.
|
||||
// If info_out != nullptr, additional details are filled in.
|
||||
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
|
||||
|
||||
private:
|
||||
|
|
|
@ -60,14 +60,14 @@ class GoogleInitializer {
|
|||
namespace { \
|
||||
static void google_init_module_##name () { body; } \
|
||||
GoogleInitializer google_initializer_module_##name(#name, \
|
||||
google_init_module_##name, NULL); \
|
||||
google_init_module_##name, nullptr); \
|
||||
}
|
||||
|
||||
#define REGISTER_MODULE_DESTRUCTOR(name, body) \
|
||||
namespace { \
|
||||
static void google_destruct_module_##name () { body; } \
|
||||
GoogleInitializer google_destructor_module_##name(#name, \
|
||||
NULL, google_destruct_module_##name); \
|
||||
nullptr, google_destruct_module_##name); \
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ static volatile int *sig_pids, sig_num_threads;
|
|||
static void SignalHandler(int signum, siginfo_t *si, void *data) {
|
||||
RAW_LOG(ERROR, "Got fatal signal %d inside ListerThread", signum);
|
||||
|
||||
if (sig_pids != NULL) {
|
||||
if (sig_pids != nullptr) {
|
||||
if (signum == SIGABRT) {
|
||||
prctl(PR_SET_PDEATHSIG, 0);
|
||||
while (sig_num_threads-- > 0) {
|
||||
|
@ -235,7 +235,7 @@ static void SignalHandler(int signum, siginfo_t *si, void *data) {
|
|||
TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
|
||||
}
|
||||
}
|
||||
sig_pids = NULL;
|
||||
sig_pids = nullptr;
|
||||
|
||||
syscall(SYS_exit, signum == SIGABRT ? 1 : 2);
|
||||
}
|
||||
|
@ -502,7 +502,7 @@ static int ListerThread(struct ListerParams *args) {
|
|||
detach_threads:
|
||||
/* Resume all threads prior to retrying the operation */
|
||||
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
|
||||
sig_pids = NULL;
|
||||
sig_pids = nullptr;
|
||||
num_threads = 0;
|
||||
sig_num_threads = num_threads;
|
||||
max_threads += 100;
|
||||
|
|
|
@ -50,8 +50,8 @@ RawFD RawOpenForWriting(const char* filename) {
|
|||
// that ever becomes a problem then we ought to compute the absolute
|
||||
// path on its behalf (perhaps the ntdll/kernel function isn't aware
|
||||
// of the working directory?)
|
||||
RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, NULL,
|
||||
CREATE_ALWAYS, 0, NULL);
|
||||
RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, nullptr,
|
||||
CREATE_ALWAYS, 0, nullptr);
|
||||
if (fd != kIllegalRawFD && GetLastError() == ERROR_ALREADY_EXISTS)
|
||||
SetEndOfFile(fd); // truncate the existing file
|
||||
return fd;
|
||||
|
@ -60,7 +60,7 @@ RawFD RawOpenForWriting(const char* filename) {
|
|||
void RawWrite(RawFD handle, const char* buf, size_t len) {
|
||||
while (len > 0) {
|
||||
DWORD wrote;
|
||||
BOOL ok = WriteFile(handle, buf, len, &wrote, NULL);
|
||||
BOOL ok = WriteFile(handle, buf, len, &wrote, nullptr);
|
||||
// We do not use an asynchronous file handle, so ok==false means an error
|
||||
if (!ok) break;
|
||||
buf += wrote;
|
||||
|
|
|
@ -118,9 +118,9 @@ enum { DEBUG_MODE = 1 };
|
|||
// Helper macro for binary operators; prints the two values on error
|
||||
// Don't use this macro directly in your code, use CHECK_EQ et al below
|
||||
|
||||
// WARNING: These don't compile correctly if one of the arguments is a pointer
|
||||
// and the other is NULL. To work around this, simply static_cast NULL to the
|
||||
// type of the desired pointer.
|
||||
// WARNING: These don't compile correctly if one of the arguments is a
|
||||
// pointer and the other is nullptr. To work around this, simply
|
||||
// static_cast nullptr to the type of the desired pointer.
|
||||
|
||||
// TODO(jandrews): Also print the values in case of failure. Requires some
|
||||
// sort of type-sensitive ToString() function.
|
||||
|
|
|
@ -102,14 +102,14 @@ namespace {
|
|||
// Finds |c| in |text|, and assign '\0' at the found position.
|
||||
// The original character at the modified position should be |c|.
|
||||
// A pointer to the modified position is stored in |endptr|.
|
||||
// |endptr| should not be NULL.
|
||||
// |endptr| should not be nullptr.
|
||||
bool ExtractUntilChar(char *text, int c, char **endptr) {
|
||||
CHECK_NE(text, NULL);
|
||||
CHECK_NE(endptr, NULL);
|
||||
CHECK_NE(text, nullptr);
|
||||
CHECK_NE(endptr, nullptr);
|
||||
char *found;
|
||||
found = strchr(text, c);
|
||||
if (found == NULL) {
|
||||
*endptr = NULL;
|
||||
if (found == nullptr) {
|
||||
*endptr = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -152,8 +152,8 @@ inline uint64_t StringToInteger<uint64_t>(char *text, char **endptr, int base) {
|
|||
template<typename T>
|
||||
T StringToIntegerUntilChar(
|
||||
char *text, int base, int c, char **endptr_result) {
|
||||
CHECK_NE(endptr_result, NULL);
|
||||
*endptr_result = NULL;
|
||||
CHECK_NE(endptr_result, nullptr);
|
||||
*endptr_result = nullptr;
|
||||
|
||||
char *endptr_extract;
|
||||
if (!ExtractUntilChar(text, c, &endptr_extract))
|
||||
|
@ -177,7 +177,7 @@ char *CopyStringUntilChar(
|
|||
char *text, unsigned out_len, int c, char *out) {
|
||||
char *endptr;
|
||||
if (!ExtractUntilChar(text, c, &endptr))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
strncpy(out, text, out_len);
|
||||
out[out_len-1] = '\0';
|
||||
|
@ -191,7 +191,7 @@ template<typename T>
|
|||
bool StringToIntegerUntilCharWithCheck(
|
||||
T *outptr, char *text, int base, int c, char **endptr) {
|
||||
*outptr = StringToIntegerUntilChar<T>(*endptr, base, c, endptr);
|
||||
if (*endptr == NULL || **endptr == '\0') return false;
|
||||
if (*endptr == nullptr || **endptr == '\0') return false;
|
||||
++(*endptr);
|
||||
return true;
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ bool ParseProcMapsLine(char *text, uint64_t *start, uint64_t *end,
|
|||
* start, end, flags, offset, major, minor, inode, filename_offset)
|
||||
*/
|
||||
char *endptr = text;
|
||||
if (endptr == NULL || *endptr == '\0') return false;
|
||||
if (endptr == nullptr || *endptr == '\0') return false;
|
||||
|
||||
if (!StringToIntegerUntilCharWithCheck(start, endptr, 16, '-', &endptr))
|
||||
return false;
|
||||
|
@ -216,7 +216,7 @@ bool ParseProcMapsLine(char *text, uint64_t *start, uint64_t *end,
|
|||
return false;
|
||||
|
||||
endptr = CopyStringUntilChar(endptr, 5, ' ', flags);
|
||||
if (endptr == NULL || *endptr == '\0') return false;
|
||||
if (endptr == nullptr || *endptr == '\0') return false;
|
||||
++endptr;
|
||||
|
||||
if (!StringToIntegerUntilCharWithCheck(offset, endptr, 16, ' ', &endptr))
|
||||
|
|
|
@ -55,9 +55,9 @@ static struct InitModule {
|
|||
int x = 0;
|
||||
// futexes are ints, so we can use them only when
|
||||
// that's the same size as the lockword_ in SpinLock.
|
||||
have_futex = (syscall(__NR_futex, &x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
|
||||
have_futex = (syscall(__NR_futex, &x, FUTEX_WAKE, 1, nullptr, nullptr, 0) >= 0);
|
||||
if (have_futex && syscall(__NR_futex, &x, FUTEX_WAKE | futex_private_flag,
|
||||
1, NULL, NULL, 0) < 0) {
|
||||
1, nullptr, nullptr, 0) < 0) {
|
||||
futex_private_flag = 0;
|
||||
}
|
||||
}
|
||||
|
@ -83,9 +83,9 @@ void SpinLockDelay(std::atomic<int> *w, int32_t value, int loop) {
|
|||
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
|
||||
syscall(__NR_futex, reinterpret_cast<int*>(w),
|
||||
FUTEX_WAIT | futex_private_flag, value,
|
||||
reinterpret_cast<struct kernel_timespec*>(&tm), NULL, 0);
|
||||
reinterpret_cast<struct kernel_timespec*>(&tm), nullptr, 0);
|
||||
} else {
|
||||
nanosleep(&tm, NULL);
|
||||
nanosleep(&tm, nullptr);
|
||||
}
|
||||
errno = save_errno;
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ void SpinLockDelay(std::atomic<int> *w, int32_t value, int loop) {
|
|||
void SpinLockWake(std::atomic<int> *w, bool all) {
|
||||
if (have_futex) {
|
||||
syscall(__NR_futex, reinterpret_cast<int*>(w),
|
||||
FUTEX_WAKE | futex_private_flag, all ? INT_MAX : 1, NULL, NULL, 0);
|
||||
FUTEX_WAKE | futex_private_flag, all ? INT_MAX : 1, nullptr, nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ void SpinLockDelay(std::atomic<int> *w, int32_t value, int loop) {
|
|||
struct timespec tm;
|
||||
tm.tv_sec = 0;
|
||||
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
|
||||
nanosleep(&tm, NULL);
|
||||
nanosleep(&tm, nullptr);
|
||||
}
|
||||
errno = save_errno;
|
||||
}
|
||||
|
|
|
@ -124,12 +124,12 @@ static size_t slow_strlen(const char *s) {
|
|||
const char* GetenvBeforeMain(const char* name) {
|
||||
const int namelen = slow_strlen(name);
|
||||
#if defined(HAVE___ENVIRON) // if we have it, it's declared in unistd.h
|
||||
if (__environ) { // can exist but be NULL, if statically linked
|
||||
if (__environ) { // can exist but be nullptr, if statically linked
|
||||
for (char** p = __environ; *p; p++) {
|
||||
if (!slow_memcmp(*p, name, namelen) && (*p)[namelen] == '=')
|
||||
return *p + namelen+1;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
// static is ok because this function should only be called before
|
||||
|
@ -152,13 +152,13 @@ const char* GetenvBeforeMain(const char* name) {
|
|||
// proc file has the format NAME=value\0NAME=value\0NAME=value\0...
|
||||
const char* endp = (char*)slow_memchr(p, '\0',
|
||||
sizeof(envbuf) - (p - envbuf));
|
||||
if (endp == NULL) // this entry isn't NUL terminated
|
||||
return NULL;
|
||||
if (endp == nullptr) // this entry isn't NUL terminated
|
||||
return nullptr;
|
||||
else if (!slow_memcmp(p, name, namelen) && p[namelen] == '=') // it's a match
|
||||
return p + namelen+1; // point after =
|
||||
p = endp + 1;
|
||||
}
|
||||
return NULL; // env var never found
|
||||
return nullptr; // env var never found
|
||||
}
|
||||
#else // PLATFORM_WINDOWS
|
||||
|
||||
|
|
|
@ -76,14 +76,14 @@ const void *VDSOSupport::Init() {
|
|||
// But going directly to kernel via /proc/self/auxv below bypasses
|
||||
// Valgrind zapping. So we check for Valgrind separately.
|
||||
if (RunningOnValgrind()) {
|
||||
vdso_base_ = NULL;
|
||||
return NULL;
|
||||
vdso_base_ = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
int fd = open("/proc/self/auxv", O_RDONLY);
|
||||
if (fd == -1) {
|
||||
// Kernel too old to have a VDSO.
|
||||
vdso_base_ = NULL;
|
||||
return NULL;
|
||||
vdso_base_ = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
ElfW(auxv_t) aux;
|
||||
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
|
||||
|
@ -97,7 +97,7 @@ const void *VDSOSupport::Init() {
|
|||
close(fd);
|
||||
if (vdso_base_ == ElfMemImage::kInvalidBase) {
|
||||
// Didn't find AT_SYSINFO_EHDR in auxv[].
|
||||
vdso_base_ = NULL;
|
||||
vdso_base_ = nullptr;
|
||||
}
|
||||
}
|
||||
return vdso_base_;
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
// VDSOSupport vdso;
|
||||
// VDSOSupport::SymbolInfo info;
|
||||
// typedef (*FN)(unsigned *, void *, void *);
|
||||
// FN fn = NULL;
|
||||
// FN fn = nullptr;
|
||||
// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
|
||||
// fn = reinterpret_cast<FN>(info.address);
|
||||
// }
|
||||
|
@ -66,7 +66,7 @@
|
|||
// See elf_mem_image.h. We only define HAVE_ELF_MEM_IMAGE for Linux/PPC.
|
||||
#define HAVE_VDSO_SUPPORT 1
|
||||
|
||||
#include <stdlib.h> // for NULL
|
||||
#include <stdlib.h>
|
||||
|
||||
namespace base {
|
||||
|
||||
|
@ -89,14 +89,14 @@ class VDSOSupport {
|
|||
// Look up versioned dynamic symbol in the kernel VDSO.
|
||||
// Returns false if VDSO is not present, or doesn't contain given
|
||||
// symbol/version/type combination.
|
||||
// If info_out != NULL, additional details are filled in.
|
||||
// If info_out != nullptr, additional details are filled in.
|
||||
bool LookupSymbol(const char *name, const char *version,
|
||||
int symbol_type, SymbolInfo *info_out) const;
|
||||
|
||||
// Find info about symbol (if any) which overlaps given address.
|
||||
// Returns true if symbol was found; false if VDSO isn't present
|
||||
// or doesn't have a symbol overlapping given address.
|
||||
// If info_out != NULL, additional details are filled in.
|
||||
// If info_out != nullptr, additional details are filled in.
|
||||
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
|
||||
|
||||
// Used only for testing. Replace real VDSO base with a mock.
|
||||
|
@ -111,7 +111,7 @@ class VDSOSupport {
|
|||
|
||||
private:
|
||||
// image_ represents VDSO ELF image in memory.
|
||||
// image_.ehdr_ == NULL implies there is no VDSO.
|
||||
// image_.ehdr_ == nullptr implies there is no VDSO.
|
||||
ElfMemImage image_;
|
||||
|
||||
// Cached value of auxv AT_SYSINFO_EHDR, computed once.
|
||||
|
|
|
@ -96,11 +96,11 @@ void CentralFreeList::ReleaseListToSpans(void* start) {
|
|||
void CentralFreeList::ReleaseToSpans(void* object) {
|
||||
const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
|
||||
Span* span = Static::pageheap()->GetDescriptor(p);
|
||||
ASSERT(span != NULL);
|
||||
ASSERT(span != nullptr);
|
||||
ASSERT(span->refcount > 0);
|
||||
|
||||
// If span is empty, move it to non-empty list
|
||||
if (span->objects == NULL) {
|
||||
if (span->objects == nullptr) {
|
||||
tcmalloc::DLL_Remove(span);
|
||||
tcmalloc::DLL_Prepend(&nonempty_, span);
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ void CentralFreeList::ReleaseToSpans(void* object) {
|
|||
if (false) {
|
||||
// Check that object does not occur in list
|
||||
int got = 0;
|
||||
for (void* p = span->objects; p != NULL; p = *((void**) p)) {
|
||||
for (void* p = span->objects; p != nullptr; p = *((void**) p)) {
|
||||
ASSERT(p != object);
|
||||
got++;
|
||||
}
|
||||
|
@ -247,15 +247,15 @@ int CentralFreeList::RemoveRange(void **start, void **end, int N) {
|
|||
}
|
||||
|
||||
int result = 0;
|
||||
*start = NULL;
|
||||
*end = NULL;
|
||||
*start = nullptr;
|
||||
*end = nullptr;
|
||||
// TODO: Prefetch multiple TCEntries?
|
||||
result = FetchFromOneSpansSafe(N, start, end);
|
||||
if (result != 0) {
|
||||
while (result < N) {
|
||||
int n;
|
||||
void* head = NULL;
|
||||
void* tail = NULL;
|
||||
void* head = nullptr;
|
||||
void* tail = nullptr;
|
||||
n = FetchFromOneSpans(N - result, &head, &tail);
|
||||
if (!n) break;
|
||||
result += n;
|
||||
|
@ -280,7 +280,7 @@ int CentralFreeList::FetchFromOneSpans(int N, void **start, void **end) {
|
|||
if (tcmalloc::DLL_IsEmpty(&nonempty_)) return 0;
|
||||
Span* span = nonempty_.next;
|
||||
|
||||
ASSERT(span->objects != NULL);
|
||||
ASSERT(span->objects != nullptr);
|
||||
|
||||
int result = 0;
|
||||
void *prev, *curr;
|
||||
|
@ -288,9 +288,9 @@ int CentralFreeList::FetchFromOneSpans(int N, void **start, void **end) {
|
|||
do {
|
||||
prev = curr;
|
||||
curr = *(reinterpret_cast<void**>(curr));
|
||||
} while (++result < N && curr != NULL);
|
||||
} while (++result < N && curr != nullptr);
|
||||
|
||||
if (curr == NULL) {
|
||||
if (curr == nullptr) {
|
||||
// Move to empty list
|
||||
tcmalloc::DLL_Remove(span);
|
||||
tcmalloc::DLL_Prepend(&empty_, span);
|
||||
|
@ -299,7 +299,7 @@ int CentralFreeList::FetchFromOneSpans(int N, void **start, void **end) {
|
|||
*start = span->objects;
|
||||
*end = prev;
|
||||
span->objects = curr;
|
||||
SLL_SetNext(*end, NULL);
|
||||
SLL_SetNext(*end, nullptr);
|
||||
span->refcount += result;
|
||||
counter_ -= result;
|
||||
return result;
|
||||
|
@ -366,7 +366,7 @@ void CentralFreeList::Populate() {
|
|||
}
|
||||
ASSERT(ptr <= limit);
|
||||
ASSERT(ptr > limit - size); // same as ptr + size > limit but avoiding overflow
|
||||
*tail = NULL;
|
||||
*tail = nullptr;
|
||||
span->refcount = 0; // No sub-object in use yet
|
||||
|
||||
// Add span to list of non-empty spans
|
||||
|
|
|
@ -109,13 +109,13 @@ class CACHELINE_ALIGNED CentralFreeList {
|
|||
|
||||
// REQUIRES: lock_ is held
|
||||
// Remove object from cache and return.
|
||||
// Return NULL if no free entries in cache.
|
||||
// Return nullptr if no free entries in cache.
|
||||
int FetchFromOneSpans(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// REQUIRES: lock_ is held
|
||||
// Remove object from cache and return. Fetches
|
||||
// from pageheap if cache is empty. Only returns
|
||||
// NULL on allocation failure.
|
||||
// nullptr on allocation failure.
|
||||
int FetchFromOneSpansSafe(int N, void **start, void **end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// REQUIRES: lock_ is held
|
||||
|
|
|
@ -62,7 +62,7 @@ static inline void InitTCMallocTransferNumObjects()
|
|||
if (FLAGS_tcmalloc_transfer_num_objects == 0) {
|
||||
const char *envval = TCMallocGetenvSafe("TCMALLOC_TRANSFER_NUM_OBJ");
|
||||
FLAGS_tcmalloc_transfer_num_objects = !envval ? kDefaultTransferNumObjecs :
|
||||
strtol(envval, NULL, 10);
|
||||
strtol(envval, nullptr, 10);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -281,8 +281,8 @@ static SpinLock metadata_alloc_lock;
|
|||
void* MetaDataAlloc(size_t bytes) {
|
||||
if (bytes >= kMetadataAllocChunkSize) {
|
||||
void *rv = TCMalloc_SystemAlloc(bytes,
|
||||
NULL, kMetadataAllignment);
|
||||
if (rv != NULL) {
|
||||
nullptr, kMetadataAllignment);
|
||||
if (rv != nullptr) {
|
||||
metadata_system_bytes_ += bytes;
|
||||
}
|
||||
return rv;
|
||||
|
@ -301,8 +301,8 @@ void* MetaDataAlloc(size_t bytes) {
|
|||
size_t real_size;
|
||||
void *ptr = TCMalloc_SystemAlloc(kMetadataAllocChunkSize,
|
||||
&real_size, kMetadataAllignment);
|
||||
if (ptr == NULL) {
|
||||
return NULL;
|
||||
if (ptr == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
metadata_chunk_alloc_ = static_cast<char *>(ptr);
|
||||
|
|
|
@ -289,8 +289,8 @@ public:
|
|||
};
|
||||
|
||||
// Allocates "bytes" worth of memory and returns it. Increments
|
||||
// metadata_system_bytes appropriately. May return NULL if allocation
|
||||
// fails. Requires pageheap_lock is held.
|
||||
// metadata_system_bytes appropriately. May return nullptr if
|
||||
// allocation fails. Requires pageheap_lock is held.
|
||||
void* MetaDataAlloc(size_t bytes);
|
||||
|
||||
// Returns the total number of bytes allocated from the system.
|
||||
|
|
|
@ -198,7 +198,7 @@ class FreeQueue {
|
|||
};
|
||||
|
||||
struct MallocBlockQueueEntry {
|
||||
MallocBlockQueueEntry() : block(NULL), size(0),
|
||||
MallocBlockQueueEntry() : block(nullptr), size(0),
|
||||
num_deleter_pcs(0) {}
|
||||
MallocBlockQueueEntry(MallocBlock* b, size_t s) : block(b), size(s) {
|
||||
if (FLAGS_max_free_queue_size != 0 && b != nullptr) {
|
||||
|
@ -425,8 +425,8 @@ class MallocBlock {
|
|||
void CheckLocked(int type) const {
|
||||
int map_type = 0;
|
||||
const int* found_type =
|
||||
alloc_map_ != NULL ? alloc_map_->Find(data_addr()) : NULL;
|
||||
if (found_type == NULL) {
|
||||
alloc_map_ != nullptr ? alloc_map_->Find(data_addr()) : nullptr;
|
||||
if (found_type == nullptr) {
|
||||
RAW_LOG(FATAL, "memory allocation bug: object at %p "
|
||||
"has never been allocated", data_addr());
|
||||
} else {
|
||||
|
@ -517,9 +517,9 @@ class MallocBlock {
|
|||
static size_t max_size_t = ~0;
|
||||
if (size > max_size_t - sizeof(MallocBlock)) {
|
||||
RAW_LOG(ERROR, "Massive size passed to malloc: %zu", size);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
MallocBlock* b = NULL;
|
||||
MallocBlock* b = nullptr;
|
||||
const bool use_malloc_page_fence = FLAGS_malloc_page_fence;
|
||||
const bool malloc_page_fence_readable = FLAGS_malloc_page_fence_readable;
|
||||
#ifdef HAVE_MMAP
|
||||
|
@ -529,10 +529,10 @@ class MallocBlock {
|
|||
size_t sz = real_mmapped_size(size);
|
||||
int pagesize = getpagesize();
|
||||
int num_pages = (sz + pagesize - 1) / pagesize + 1;
|
||||
char* p = (char*) mmap(NULL, num_pages * pagesize, PROT_READ|PROT_WRITE,
|
||||
char* p = (char*) mmap(nullptr, num_pages * pagesize, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
if (p == MAP_FAILED) {
|
||||
// If the allocation fails, abort rather than returning NULL to
|
||||
// If the allocation fails, abort rather than returning nullptr to
|
||||
// malloc. This is because in most cases, the program will run out
|
||||
// of memory in this mode due to tremendous amount of wastage. There
|
||||
// is no point in propagating the error elsewhere.
|
||||
|
@ -556,8 +556,8 @@ class MallocBlock {
|
|||
// It would be nice to output a diagnostic on allocation failure
|
||||
// here, but logging (other than FATAL) requires allocating
|
||||
// memory, which could trigger a nasty recursion. Instead, preserve
|
||||
// malloc semantics and return NULL on failure.
|
||||
if (b != NULL) {
|
||||
// malloc semantics and return nullptr on failure.
|
||||
if (b != nullptr) {
|
||||
b->magic1_ = use_malloc_page_fence ? kMagicMMap : kMagicMalloc;
|
||||
b->Initialize(size, type);
|
||||
}
|
||||
|
@ -605,11 +605,11 @@ class MallocBlock {
|
|||
int num_entries = 0;
|
||||
MallocBlockQueueEntry new_entry(b, size);
|
||||
free_queue_lock_.Lock();
|
||||
if (free_queue_ == NULL)
|
||||
if (free_queue_ == nullptr)
|
||||
free_queue_ = new FreeQueue<MallocBlockQueueEntry>;
|
||||
RAW_CHECK(!free_queue_->Full(), "Free queue mustn't be full!");
|
||||
|
||||
if (b != NULL) {
|
||||
if (b != nullptr) {
|
||||
free_queue_size_ += size + sizeof(MallocBlockQueueEntry);
|
||||
free_queue_->Push(new_entry);
|
||||
}
|
||||
|
@ -845,7 +845,7 @@ class MallocBlock {
|
|||
|
||||
void DanglingWriteChecker() {
|
||||
// Clear out the remaining free queue to check for dangling writes.
|
||||
MallocBlock::ProcessFreeQueue(NULL, 0, 0);
|
||||
MallocBlock::ProcessFreeQueue(nullptr, 0, 0);
|
||||
}
|
||||
|
||||
// ========================================================================= //
|
||||
|
@ -860,14 +860,14 @@ const char* const MallocBlock::kAllocName[] = {
|
|||
"malloc",
|
||||
"new",
|
||||
"new []",
|
||||
NULL,
|
||||
nullptr,
|
||||
};
|
||||
|
||||
const char* const MallocBlock::kDeallocName[] = {
|
||||
"free",
|
||||
"delete",
|
||||
"delete []",
|
||||
NULL,
|
||||
nullptr,
|
||||
};
|
||||
|
||||
int MallocBlock::stats_blocks_;
|
||||
|
@ -987,7 +987,7 @@ static int TraceFd() {
|
|||
}
|
||||
// Add a header to the log.
|
||||
TracePrintf(trace_fd, "Trace started: %lu\n",
|
||||
static_cast<unsigned long>(time(NULL)));
|
||||
static_cast<unsigned long>(time(nullptr)));
|
||||
TracePrintf(trace_fd,
|
||||
"func\tsize\tptr\tthread_id\tstack pcs for tools/symbolize\n");
|
||||
}
|
||||
|
@ -1051,7 +1051,7 @@ static inline void* DebugAllocate(size_t size, int type) {
|
|||
if (size == 0) size = 1;
|
||||
#endif
|
||||
MallocBlock* ptr = MallocBlock::Allocate(size, type);
|
||||
if (ptr == NULL) return NULL;
|
||||
if (ptr == nullptr) return nullptr;
|
||||
MALLOC_TRACE("malloc", size, ptr->data_addr());
|
||||
return ptr->data_addr();
|
||||
}
|
||||
|
@ -1141,7 +1141,7 @@ class DebugMallocImplementation : public TCMallocImplementation {
|
|||
|
||||
virtual MallocExtension::Ownership GetOwnership(const void* p) {
|
||||
if (!p) {
|
||||
// nobody owns NULL
|
||||
// nobody owns nullptr
|
||||
return MallocExtension::kNotOwned;
|
||||
}
|
||||
|
||||
|
@ -1220,7 +1220,7 @@ static void *retry_debug_allocate(void *arg) {
|
|||
// from the logic of calling the new-handler.
|
||||
inline void* debug_cpp_alloc(size_t size, int new_type, bool nothrow) {
|
||||
void* p = DebugAllocate(size, new_type);
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
return p;
|
||||
}
|
||||
struct debug_alloc_retry_data data;
|
||||
|
@ -1232,7 +1232,7 @@ inline void* debug_cpp_alloc(size_t size, int new_type, bool nothrow) {
|
|||
|
||||
inline void* do_debug_malloc_or_debug_cpp_alloc(size_t size) {
|
||||
void* p = DebugAllocate(size, MallocBlock::kMallocType);
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
return p;
|
||||
}
|
||||
struct debug_alloc_retry_data data;
|
||||
|
@ -1278,7 +1278,7 @@ extern "C" PERFTOOLS_DLL_DECL void tc_free_sized(void *ptr, size_t size) PERFTOO
|
|||
extern "C" PERFTOOLS_DLL_DECL void* tc_calloc(size_t count, size_t size) PERFTOOLS_NOTHROW {
|
||||
// Overflow check
|
||||
const size_t total_size = count * size;
|
||||
if (size != 0 && total_size / size != count) return NULL;
|
||||
if (size != 0 && total_size / size != count) return nullptr;
|
||||
|
||||
void* block = do_debug_malloc_or_debug_cpp_alloc(total_size);
|
||||
if (block) memset(block, 0, total_size);
|
||||
|
@ -1329,7 +1329,7 @@ extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* ptr, size_t size) PERFTOOLS
|
|||
extern "C" PERFTOOLS_DLL_DECL void* tc_new(size_t size) {
|
||||
void* ptr = debug_cpp_alloc(size, MallocBlock::kNewType, false);
|
||||
MallocHook::InvokeNewHook(ptr, size);
|
||||
if (ptr == NULL) {
|
||||
if (ptr == nullptr) {
|
||||
RAW_LOG(FATAL, "Unable to allocate %zu bytes: new failed.", size);
|
||||
}
|
||||
return ptr;
|
||||
|
@ -1364,7 +1364,7 @@ extern "C" PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p, const std::nothrow
|
|||
extern "C" PERFTOOLS_DLL_DECL void* tc_newarray(size_t size) {
|
||||
void* ptr = debug_cpp_alloc(size, MallocBlock::kArrayNewType, false);
|
||||
MallocHook::InvokeNewHook(ptr, size);
|
||||
if (ptr == NULL) {
|
||||
if (ptr == nullptr) {
|
||||
RAW_LOG(FATAL, "Unable to allocate %zu bytes: new[] failed.", size);
|
||||
}
|
||||
return ptr;
|
||||
|
@ -1407,7 +1407,7 @@ static void *do_debug_memalign(size_t alignment, size_t size, int type) {
|
|||
// Allocate "alignment-1" extra bytes to ensure alignment is possible, and
|
||||
// a further data_offset bytes for an additional fake header.
|
||||
size_t extra_bytes = data_offset + alignment - 1;
|
||||
if (size + extra_bytes < size) return NULL; // Overflow
|
||||
if (size + extra_bytes < size) return nullptr; // Overflow
|
||||
p = DebugAllocate(size + extra_bytes, type);
|
||||
if (p != 0) {
|
||||
intptr_t orig_p = reinterpret_cast<intptr_t>(p);
|
||||
|
@ -1448,7 +1448,7 @@ void* do_debug_memalign_or_debug_cpp_memalign(size_t align,
|
|||
bool from_operator,
|
||||
bool nothrow) {
|
||||
void* p = do_debug_memalign(align, size, type);
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -1477,7 +1477,7 @@ extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign(void** result_ptr, size_t al
|
|||
|
||||
void* result = do_debug_memalign_or_debug_cpp_memalign(align, size, MallocBlock::kMallocType, false, true);
|
||||
MallocHook::InvokeNewHook(result, size);
|
||||
if (result == NULL) {
|
||||
if (result == nullptr) {
|
||||
return ENOMEM;
|
||||
} else {
|
||||
*result_ptr = result;
|
||||
|
|
|
@ -102,13 +102,13 @@ static void InitEmergencyMalloc(void) {
|
|||
ATTRIBUTE_HIDDEN void *EmergencyMalloc(size_t size) {
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
|
||||
if (emergency_arena_start == NULL) {
|
||||
if (emergency_arena_start == nullptr) {
|
||||
InitEmergencyMalloc();
|
||||
CHECK_CONDITION(emergency_arena_start != NULL);
|
||||
CHECK_CONDITION(emergency_arena_start != nullptr);
|
||||
}
|
||||
|
||||
void *rv = LowLevelAlloc::AllocWithArena(size, emergency_arena);
|
||||
if (rv == NULL) {
|
||||
if (rv == nullptr) {
|
||||
errno = ENOMEM;
|
||||
}
|
||||
return rv;
|
||||
|
@ -116,9 +116,9 @@ ATTRIBUTE_HIDDEN void *EmergencyMalloc(size_t size) {
|
|||
|
||||
ATTRIBUTE_HIDDEN void EmergencyFree(void *p) {
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
if (emergency_arena_start == NULL) {
|
||||
if (emergency_arena_start == nullptr) {
|
||||
InitEmergencyMalloc();
|
||||
CHECK_CONDITION(emergency_arena_start != NULL);
|
||||
CHECK_CONDITION(emergency_arena_start != nullptr);
|
||||
free(p);
|
||||
return;
|
||||
}
|
||||
|
@ -127,12 +127,12 @@ ATTRIBUTE_HIDDEN void EmergencyFree(void *p) {
|
|||
}
|
||||
|
||||
ATTRIBUTE_HIDDEN void *EmergencyRealloc(void *_old_ptr, size_t new_size) {
|
||||
if (_old_ptr == NULL) {
|
||||
if (_old_ptr == nullptr) {
|
||||
return EmergencyMalloc(new_size);
|
||||
}
|
||||
if (new_size == 0) {
|
||||
EmergencyFree(_old_ptr);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
SpinLockHolder l(&emergency_malloc_lock);
|
||||
CHECK_CONDITION(emergency_arena_start);
|
||||
|
@ -148,9 +148,9 @@ ATTRIBUTE_HIDDEN void *EmergencyRealloc(void *_old_ptr, size_t new_size) {
|
|||
size_t copy_size = (new_size < old_ptr_size) ? new_size : old_ptr_size;
|
||||
|
||||
void *new_ptr = LowLevelAlloc::AllocWithArena(new_size, emergency_arena);
|
||||
if (new_ptr == NULL) {
|
||||
if (new_ptr == nullptr) {
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
memcpy(new_ptr, old_ptr, copy_size);
|
||||
|
||||
|
|
|
@ -91,8 +91,8 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
|
|||
static bool IsActive();
|
||||
|
||||
// Return pointer to the whole-program checker if it has been created
|
||||
// and NULL otherwise.
|
||||
// Once GlobalChecker() returns non-NULL that object will not disappear and
|
||||
// and nullptr otherwise.
|
||||
// Once GlobalChecker() returns non-nullptr that object will not disappear and
|
||||
// will be returned by all later GlobalChecker calls.
|
||||
// This is mainly to access BytesLeaked() and ObjectsLeaked() (see below)
|
||||
// for the whole-program checker after one calls NoGlobalLeaks()
|
||||
|
@ -357,9 +357,9 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
|
|||
|
||||
class SpinLock* lock_; // to make HeapLeakChecker objects thread-safe
|
||||
const char* name_; // our remembered name (we own it)
|
||||
// NULL means this leak checker is a noop
|
||||
// nullptr means this leak checker is a noop
|
||||
|
||||
// Snapshot taken when the checker was created. May be NULL
|
||||
// Snapshot taken when the checker was created. May be nullptr
|
||||
// for the global heap checker object. We use void* instead of
|
||||
// HeapProfileTable::Snapshot* to avoid including heap-profile-table.h.
|
||||
void* start_snapshot_;
|
||||
|
|
|
@ -79,7 +79,7 @@ class PERFTOOLS_DLL_DECL SysAllocator {
|
|||
virtual ~SysAllocator();
|
||||
|
||||
// Allocates "size"-byte of memory from system aligned with "alignment".
|
||||
// Returns NULL if failed. Otherwise, the returned pointer p up to and
|
||||
// Returns nullptr if failed. Otherwise, the returned pointer p up to and
|
||||
// including (p + actual_size -1) have been allocated.
|
||||
virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0;
|
||||
};
|
||||
|
@ -229,14 +229,14 @@ class PERFTOOLS_DLL_DECL MallocExtension {
|
|||
// Get the named "property"'s value. Returns true if the property
|
||||
// is known. Returns false if the property is not a valid property
|
||||
// name for the current malloc implementation.
|
||||
// REQUIRES: property != NULL; value != NULL
|
||||
// REQUIRES: property != nullptr; value != nullptr
|
||||
virtual bool GetNumericProperty(const char* property, size_t* value);
|
||||
|
||||
// Set the named "property"'s value. Returns true if the property
|
||||
// is known and writable. Returns false if the property is not a
|
||||
// valid property name for the current malloc implementation, or
|
||||
// is not writable.
|
||||
// REQUIRES: property != NULL
|
||||
// REQUIRES: property != nullptr
|
||||
virtual bool SetNumericProperty(const char* property, size_t value);
|
||||
|
||||
// Mark the current thread as "idle". This routine may optionally
|
||||
|
@ -259,7 +259,7 @@ class PERFTOOLS_DLL_DECL MallocExtension {
|
|||
virtual void MarkThreadBusy();
|
||||
|
||||
// Gets the system allocator used by the malloc extension instance. Returns
|
||||
// NULL for malloc implementations that do not support pluggable system
|
||||
// nullptr for malloc implementations that do not support pluggable system
|
||||
// allocators.
|
||||
virtual SysAllocator* GetSystemAllocator();
|
||||
|
||||
|
@ -314,7 +314,7 @@ class PERFTOOLS_DLL_DECL MallocExtension {
|
|||
// p must have been allocated by this malloc implementation,
|
||||
// must not be an interior pointer -- that is, must be exactly
|
||||
// the pointer returned to by malloc() et al., not some offset
|
||||
// from that -- and should not have been freed yet. p may be NULL.
|
||||
// from that -- and should not have been freed yet. p may be nullptr.
|
||||
// (Currently only implemented in tcmalloc; other implementations
|
||||
// will return 0.)
|
||||
// This is equivalent to malloc_size() in OS X, malloc_usable_size()
|
||||
|
@ -323,7 +323,7 @@ class PERFTOOLS_DLL_DECL MallocExtension {
|
|||
|
||||
// Returns kOwned if this malloc implementation allocated the memory
|
||||
// pointed to by p, or kNotOwned if some other malloc implementation
|
||||
// allocated it or p is NULL. May also return kUnknownOwnership if
|
||||
// allocated it or p is nullptr. May also return kUnknownOwnership if
|
||||
// the malloc implementation does not keep track of ownership.
|
||||
// REQUIRES: p must be a value returned from a previous call to
|
||||
// malloc(), calloc(), realloc(), memalign(), posix_memalign(),
|
||||
|
@ -339,7 +339,7 @@ class PERFTOOLS_DLL_DECL MallocExtension {
|
|||
};
|
||||
virtual Ownership GetOwnership(const void* p);
|
||||
|
||||
// The current malloc implementation. Always non-NULL.
|
||||
// The current malloc implementation. Always non-nullptr.
|
||||
static MallocExtension* instance();
|
||||
|
||||
// DEPRECATED. Internal.
|
||||
|
@ -399,7 +399,7 @@ class PERFTOOLS_DLL_DECL MallocExtension {
|
|||
//
|
||||
// It is the responsibility of the caller to "delete[]" the returned array.
|
||||
//
|
||||
// May return NULL to indicate no results.
|
||||
// May return nullptr to indicate no results.
|
||||
//
|
||||
// This is an internal extension. Callers should use the more
|
||||
// convenient "GetHeapSample(string*)" method defined above.
|
||||
|
|
|
@ -98,7 +98,7 @@ extern "C" {
|
|||
class PERFTOOLS_DLL_DECL MallocHook {
|
||||
public:
|
||||
// The NewHook is invoked whenever an object is allocated.
|
||||
// It may be passed NULL if the allocator returned NULL.
|
||||
// It may be passed nullptr if the allocator returned nullptr.
|
||||
typedef MallocHook_NewHook NewHook;
|
||||
inline static bool AddNewHook(NewHook hook) {
|
||||
return MallocHook_AddNewHook(hook);
|
||||
|
@ -109,7 +109,7 @@ class PERFTOOLS_DLL_DECL MallocHook {
|
|||
inline static void InvokeNewHook(const void* p, size_t s);
|
||||
|
||||
// The DeleteHook is invoked whenever an object is deallocated.
|
||||
// It may be passed NULL if the caller is trying to delete NULL.
|
||||
// It may be passed nullptr if the caller is trying to delete nullptr.
|
||||
typedef MallocHook_DeleteHook DeleteHook;
|
||||
inline static bool AddDeleteHook(DeleteHook hook) {
|
||||
return MallocHook_AddDeleteHook(hook);
|
||||
|
|
|
@ -89,7 +89,7 @@ extern "C" {
|
|||
struct ProfilerOptions {
|
||||
/* Filter function and argument.
|
||||
*
|
||||
* If filter_in_thread is not NULL, when a profiling tick is delivered
|
||||
* If filter_in_thread is not nullptr, when a profiling tick is delivered
|
||||
* the profiler will call:
|
||||
*
|
||||
* (*filter_in_thread)(filter_in_thread_arg)
|
||||
|
@ -112,7 +112,7 @@ struct ProfilerOptions {
|
|||
/* Start profiling and write profile info into fname, discarding any
|
||||
* existing profiling data in that file.
|
||||
*
|
||||
* This is equivalent to calling ProfilerStartWithOptions(fname, NULL).
|
||||
* This is equivalent to calling ProfilerStartWithOptions(fname, nullptr).
|
||||
*/
|
||||
PERFTOOLS_DLL_DECL int ProfilerStart(const char* fname);
|
||||
|
||||
|
@ -122,7 +122,7 @@ PERFTOOLS_DLL_DECL int ProfilerStart(const char* fname);
|
|||
* The profiler is configured using the options given by 'options'.
|
||||
* Options which are not specified are given default values.
|
||||
*
|
||||
* 'options' may be NULL, in which case all are given default values.
|
||||
* 'options' may be nullptr, in which case all are given default values.
|
||||
*
|
||||
* Returns nonzero if profiling was started successfully, or zero else.
|
||||
*/
|
||||
|
|
|
@ -77,14 +77,14 @@
|
|||
// be identified.
|
||||
//
|
||||
// This routine may return fewer stack frame entries than are
|
||||
// available. Also note that "result" and "sizes" must both be non-NULL.
|
||||
// available. Also note that "result" and "sizes" must both be non-nullptr.
|
||||
extern PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth,
|
||||
int skip_count);
|
||||
|
||||
// Same as above, but to be used from a signal handler. The "uc" parameter
|
||||
// should be the pointer to ucontext_t which was passed as the 3rd parameter
|
||||
// to sa_sigaction signal handler. It may help the unwinder to get a
|
||||
// better stack trace under certain conditions. The "uc" may safely be NULL.
|
||||
// better stack trace under certain conditions. The "uc" may safely be nullptr.
|
||||
extern PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
|
||||
int skip_count, const void *uc);
|
||||
|
||||
|
@ -103,14 +103,14 @@ extern PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* size
|
|||
// result[1] main
|
||||
// .... ...
|
||||
//
|
||||
// "result" must not be NULL.
|
||||
// "result" must not be nullptr.
|
||||
extern PERFTOOLS_DLL_DECL int GetStackTrace(void** result, int max_depth,
|
||||
int skip_count);
|
||||
|
||||
// Same as above, but to be used from a signal handler. The "uc" parameter
|
||||
// should be the pointer to ucontext_t which was passed as the 3rd parameter
|
||||
// to sa_sigaction signal handler. It may help the unwinder to get a
|
||||
// better stack trace under certain conditions. The "uc" may safely be NULL.
|
||||
// better stack trace under certain conditions. The "uc" may safely be nullptr.
|
||||
extern PERFTOOLS_DLL_DECL int GetStackTraceWithContext(void** result, int max_depth,
|
||||
int skip_count, const void *uc);
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ extern "C" {
|
|||
#endif
|
||||
/*
|
||||
* Returns a human-readable version string. If major, minor,
|
||||
* and/or patch are not NULL, they are set to the major version,
|
||||
* and/or patch are not nullptr, they are set to the major version,
|
||||
* minor version, and patch-code (a string, usually "").
|
||||
*/
|
||||
PERFTOOLS_DLL_DECL const char* tc_version(int* major, int* minor,
|
||||
|
|
|
@ -194,7 +194,7 @@ static bool IsDebuggerAttached(void) { // only works under linux, probably
|
|||
const char *const kTracerPid = "TracerPid:\t";
|
||||
buf[len - 1] = '\0';
|
||||
const char *p = strstr(buf, kTracerPid);
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
rc = (strncmp(p + strlen(kTracerPid), "0\n", 2) != 0);
|
||||
}
|
||||
}
|
||||
|
@ -319,12 +319,12 @@ static SpinLock heap_checker_lock;
|
|||
|
||||
// Heap profile prefix for leak checking profiles.
|
||||
// Gets assigned once when leak checking is turned on, then never modified.
|
||||
static const string* profile_name_prefix = NULL;
|
||||
static const string* profile_name_prefix;
|
||||
|
||||
// Whole-program heap leak checker.
|
||||
// Gets assigned once when leak checking is turned on,
|
||||
// then main_heap_checker is never deleted.
|
||||
static HeapLeakChecker* main_heap_checker = NULL;
|
||||
static HeapLeakChecker* main_heap_checker;
|
||||
|
||||
// Whether we will use main_heap_checker to do a check at program exit
|
||||
// automatically. In any case user can ask for more checks on main_heap_checker
|
||||
|
@ -336,7 +336,7 @@ static bool do_main_heap_check = false;
|
|||
// together with setting heap_checker_on (below) to true
|
||||
// and registering our new/delete malloc hooks;
|
||||
// similarly all are unset in HeapLeakChecker::TurnItselfOffLocked.
|
||||
static HeapProfileTable* heap_profile = NULL;
|
||||
static HeapProfileTable* heap_profile;
|
||||
|
||||
// If we are doing (or going to do) any kind of heap-checking.
|
||||
static bool heap_checker_on = false;
|
||||
|
@ -366,7 +366,7 @@ class HeapLeakChecker::Allocator {
|
|||
public:
|
||||
static void Init() {
|
||||
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
|
||||
RAW_DCHECK(arena_ == NULL, "");
|
||||
RAW_DCHECK(arena_ == nullptr, "");
|
||||
arena_ = LowLevelAlloc::NewArena(nullptr);
|
||||
}
|
||||
static void Shutdown() {
|
||||
|
@ -393,22 +393,22 @@ class HeapLeakChecker::Allocator {
|
|||
static void Free(void* p, size_t /* n */) {
|
||||
Free(p);
|
||||
}
|
||||
// destruct, free, and make *p to be NULL
|
||||
// destruct, free, and make *p to be nullptr
|
||||
template<typename T> static void DeleteAndNull(T** p) {
|
||||
(*p)->~T();
|
||||
Free(*p);
|
||||
*p = NULL;
|
||||
*p = nullptr;
|
||||
}
|
||||
template<typename T> static void DeleteAndNullIfNot(T** p) {
|
||||
if (*p != NULL) DeleteAndNull(p);
|
||||
if (*p != nullptr) DeleteAndNull(p);
|
||||
}
|
||||
private:
|
||||
static LowLevelAlloc::Arena* arena_;
|
||||
static int alloc_count_;
|
||||
};
|
||||
|
||||
LowLevelAlloc::Arena* HeapLeakChecker::Allocator::arena_ = NULL;
|
||||
int HeapLeakChecker::Allocator::alloc_count_ = 0;
|
||||
LowLevelAlloc::Arena* HeapLeakChecker::Allocator::arena_;
|
||||
int HeapLeakChecker::Allocator::alloc_count_;
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
// HeapLeakChecker live object tracking components
|
||||
|
@ -441,7 +441,7 @@ typedef map<uintptr_t, size_t, less<uintptr_t>,
|
|||
STL_Allocator<pair<const uintptr_t, size_t>,
|
||||
HeapLeakChecker::Allocator>
|
||||
> IgnoredObjectsMap;
|
||||
static IgnoredObjectsMap* ignored_objects = NULL;
|
||||
static IgnoredObjectsMap* ignored_objects;
|
||||
|
||||
// All objects (memory ranges) that we consider to be the sources of pointers
|
||||
// to live (not leaked) objects.
|
||||
|
@ -455,7 +455,7 @@ static IgnoredObjectsMap* ignored_objects = NULL;
|
|||
typedef vector<AllocObject,
|
||||
STL_Allocator<AllocObject, HeapLeakChecker::Allocator>
|
||||
> LiveObjectsStack;
|
||||
static LiveObjectsStack* live_objects = NULL;
|
||||
static LiveObjectsStack* live_objects;
|
||||
|
||||
// A special string type that uses my allocator
|
||||
typedef basic_string<char, char_traits<char>,
|
||||
|
@ -468,7 +468,7 @@ typedef map<HCL_string, LiveObjectsStack, less<HCL_string>,
|
|||
STL_Allocator<pair<const HCL_string, LiveObjectsStack>,
|
||||
HeapLeakChecker::Allocator>
|
||||
> LibraryLiveObjectsStacks;
|
||||
static LibraryLiveObjectsStacks* library_live_objects = NULL;
|
||||
static LibraryLiveObjectsStacks* library_live_objects;
|
||||
|
||||
// Value stored in the map of disabled address ranges;
|
||||
// its key is the end of the address range.
|
||||
|
@ -484,7 +484,7 @@ typedef map<uintptr_t, HeapLeakChecker::RangeValue, less<uintptr_t>,
|
|||
> DisabledRangeMap;
|
||||
// The disabled program counter address ranges for profile dumping
|
||||
// that are registered with HeapLeakChecker::DisableChecksFromToLocked.
|
||||
static DisabledRangeMap* disabled_ranges = NULL;
|
||||
static DisabledRangeMap* disabled_ranges;
|
||||
|
||||
// Set of stack tops.
|
||||
// These are used to consider live only appropriate chunks of the memory areas
|
||||
|
@ -493,7 +493,7 @@ static DisabledRangeMap* disabled_ranges = NULL;
|
|||
typedef set<uintptr_t, less<uintptr_t>,
|
||||
STL_Allocator<uintptr_t, HeapLeakChecker::Allocator>
|
||||
> StackTopSet;
|
||||
static StackTopSet* stack_tops = NULL;
|
||||
static StackTopSet* stack_tops;
|
||||
|
||||
// A map of ranges of code addresses for the system libraries
|
||||
// that can mmap/mremap/sbrk-allocate memory regions for stacks
|
||||
|
@ -503,7 +503,7 @@ typedef map<uintptr_t, uintptr_t, less<uintptr_t>,
|
|||
STL_Allocator<pair<const uintptr_t, uintptr_t>,
|
||||
HeapLeakChecker::Allocator>
|
||||
> GlobalRegionCallerRangeMap;
|
||||
static GlobalRegionCallerRangeMap* global_region_caller_ranges = NULL;
|
||||
static GlobalRegionCallerRangeMap* global_region_caller_ranges;
|
||||
|
||||
// TODO(maxim): make our big data structs into own modules
|
||||
|
||||
|
@ -569,19 +569,19 @@ inline static uintptr_t AsInt(const void* ptr) {
|
|||
static const char* hc_strstr(const char* s1, const char* s2) {
|
||||
const size_t len = strlen(s2);
|
||||
RAW_CHECK(len > 0, "Unexpected empty string passed to strstr()");
|
||||
for (const char* p = strchr(s1, *s2); p != NULL; p = strchr(p+1, *s2)) {
|
||||
for (const char* p = strchr(s1, *s2); p != nullptr; p = strchr(p+1, *s2)) {
|
||||
if (strncmp(p, s2, len) == 0) {
|
||||
return p;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
|
||||
// Our hooks for MallocHook
|
||||
static void NewHook(const void* ptr, size_t size) {
|
||||
if (ptr != NULL) {
|
||||
if (ptr != nullptr) {
|
||||
const int counter = get_thread_disable_counter();
|
||||
const bool ignore = (counter > 0);
|
||||
RAW_VLOG(16, "Recording Alloc: %p of %zu; %d", ptr, size,
|
||||
|
@ -609,7 +609,7 @@ static void NewHook(const void* ptr, size_t size) {
|
|||
}
|
||||
|
||||
static void DeleteHook(const void* ptr) {
|
||||
if (ptr != NULL) {
|
||||
if (ptr != nullptr) {
|
||||
RAW_VLOG(16, "Recording Free %p", ptr);
|
||||
{ SpinLockHolder l(&heap_checker_lock);
|
||||
if (heap_checker_on) heap_profile->RecordFree(ptr);
|
||||
|
@ -819,8 +819,8 @@ static void RecordGlobalDataLocked(uintptr_t start_address,
|
|||
const char* filename) {
|
||||
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
|
||||
// Ignore non-writeable regions.
|
||||
if (strchr(permissions, 'w') == NULL) return;
|
||||
if (filename == NULL || *filename == '\0') {
|
||||
if (strchr(permissions, 'w') == nullptr) return;
|
||||
if (filename == nullptr || *filename == '\0') {
|
||||
filename = kUnnamedProcSelfMapEntry;
|
||||
}
|
||||
RAW_VLOG(11, "Looking into %s: 0x%" PRIxPTR "..0x%" PRIxPTR,
|
||||
|
@ -836,7 +836,7 @@ static void RecordGlobalDataLocked(uintptr_t start_address,
|
|||
static bool IsLibraryNamed(const char* library, const char* library_base) {
|
||||
const char* p = hc_strstr(library, library_base);
|
||||
size_t sz = strlen(library_base);
|
||||
return p != NULL && (p[sz] == '.' || p[sz] == '-');
|
||||
return p != nullptr && (p[sz] == '.' || p[sz] == '-');
|
||||
}
|
||||
|
||||
// static
|
||||
|
@ -893,7 +893,7 @@ void HeapLeakChecker::DisableLibraryAllocsLocked(const char* library,
|
|||
IsLibraryNamed(library, "/ld")) {
|
||||
RAW_VLOG(10, "Global memory regions made by %s will be live data",
|
||||
library);
|
||||
if (global_region_caller_ranges == NULL) {
|
||||
if (global_region_caller_ranges == nullptr) {
|
||||
global_region_caller_ranges =
|
||||
new(Allocator::Allocate(sizeof(GlobalRegionCallerRangeMap)))
|
||||
GlobalRegionCallerRangeMap;
|
||||
|
@ -1257,7 +1257,7 @@ static va_list dummy_ap;
|
|||
// static
|
||||
void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
|
||||
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
|
||||
RAW_CHECK(live_objects == NULL, "");
|
||||
RAW_CHECK(live_objects == nullptr, "");
|
||||
live_objects = new(Allocator::Allocate(sizeof(LiveObjectsStack)))
|
||||
LiveObjectsStack;
|
||||
stack_tops = new(Allocator::Allocate(sizeof(StackTopSet))) StackTopSet;
|
||||
|
@ -1297,7 +1297,7 @@ void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
|
|||
// these two conditions:
|
||||
bool want_and_can_run_in_main_thread =
|
||||
ProfilingIsEnabledForAllThreads() &&
|
||||
TCMalloc_ListAllProcessThreads(NULL, IsOneThread) == 1;
|
||||
TCMalloc_ListAllProcessThreads(nullptr, IsOneThread) == 1;
|
||||
// When the normal path of TCMalloc_ListAllProcessThreads below is taken,
|
||||
// we fully suspend the threads right here before any liveness checking
|
||||
// and keep them suspended for the whole time of liveness checking
|
||||
|
@ -1306,8 +1306,8 @@ void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
|
|||
// if not suspended they could still mess with the pointer
|
||||
// graph while we walk it).
|
||||
int r = want_and_can_run_in_main_thread
|
||||
? IgnoreLiveThreadsLocked(NULL, 1, &self_thread_pid, dummy_ap)
|
||||
: TCMalloc_ListAllProcessThreads(NULL, IgnoreLiveThreadsLocked);
|
||||
? IgnoreLiveThreadsLocked(nullptr, 1, &self_thread_pid, dummy_ap)
|
||||
: TCMalloc_ListAllProcessThreads(nullptr, IgnoreLiveThreadsLocked);
|
||||
need_to_ignore_non_thread_objects = r < 0;
|
||||
if (r < 0) {
|
||||
RAW_LOG(WARNING, "Thread finding failed with %d errno=%d", r, errno);
|
||||
|
@ -1520,7 +1520,7 @@ void HeapLeakChecker::DoIgnoreObject(const void* ptr) {
|
|||
} else {
|
||||
RAW_VLOG(10, "Going to ignore live object at %p of %zu bytes",
|
||||
ptr, object_size);
|
||||
if (ignored_objects == NULL) {
|
||||
if (ignored_objects == nullptr) {
|
||||
ignored_objects = new(Allocator::Allocate(sizeof(IgnoredObjectsMap)))
|
||||
IgnoredObjectsMap;
|
||||
}
|
||||
|
@ -1570,8 +1570,8 @@ char* HeapLeakChecker::MakeProfileNameLocked() {
|
|||
|
||||
void HeapLeakChecker::Create(const char *name, bool make_start_snapshot) {
|
||||
SpinLockHolder l(lock_);
|
||||
name_ = NULL; // checker is inactive
|
||||
start_snapshot_ = NULL;
|
||||
name_ = nullptr; // checker is inactive
|
||||
start_snapshot_ = nullptr;
|
||||
has_checked_ = false;
|
||||
inuse_bytes_increase_ = 0;
|
||||
inuse_allocs_increase_ = 0;
|
||||
|
@ -1582,8 +1582,8 @@ void HeapLeakChecker::Create(const char *name, bool make_start_snapshot) {
|
|||
SpinLockHolder al(&alignment_checker_lock);
|
||||
SpinLockHolder hl(&heap_checker_lock);
|
||||
MemoryRegionMap::LockHolder ml;
|
||||
if (heap_checker_on && profile_name_prefix != NULL) {
|
||||
RAW_DCHECK(strchr(name, '/') == NULL, "must be a simple name");
|
||||
if (heap_checker_on && profile_name_prefix != nullptr) {
|
||||
RAW_DCHECK(strchr(name, '/') == nullptr, "must be a simple name");
|
||||
memcpy(n, name, strlen(name) + 1);
|
||||
name_ = n; // checker is active
|
||||
if (make_start_snapshot) {
|
||||
|
@ -1602,7 +1602,7 @@ void HeapLeakChecker::Create(const char *name, bool make_start_snapshot) {
|
|||
RAW_LOG(WARNING, "To activate set the HEAPCHECK environment variable.\n");
|
||||
}
|
||||
}
|
||||
if (name_ == NULL) {
|
||||
if (name_ == nullptr) {
|
||||
UnIgnoreObject(n);
|
||||
delete[] n; // must be done after we unlock
|
||||
}
|
||||
|
@ -1721,15 +1721,15 @@ bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) {
|
|||
" program-exit's automatic check.");
|
||||
}
|
||||
|
||||
HeapProfileTable::Snapshot* leaks = NULL;
|
||||
char* pprof_file = NULL;
|
||||
HeapProfileTable::Snapshot* leaks = nullptr;
|
||||
char* pprof_file = nullptr;
|
||||
|
||||
{
|
||||
// Heap activity in other threads is paused during this function
|
||||
// (i.e. until we got all profile difference info).
|
||||
SpinLockHolder hl(&heap_checker_lock);
|
||||
if (heap_checker_on == false) {
|
||||
if (name_ != NULL) { // leak checking enabled when created the checker
|
||||
if (name_ != nullptr) { // leak checking enabled when created the checker
|
||||
RAW_LOG(WARNING, "Heap leak checker got turned off after checker "
|
||||
"\"%s\" has been created, no leak check is being done for it!",
|
||||
name_);
|
||||
|
@ -1748,7 +1748,7 @@ bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) {
|
|||
// can detect leaks in the heap-leak-checket itself
|
||||
const int initial_allocs = Allocator::alloc_count();
|
||||
|
||||
if (name_ == NULL) {
|
||||
if (name_ == nullptr) {
|
||||
RAW_LOG(FATAL, "Heap leak checker must not be turned on "
|
||||
"after construction of a HeapLeakChecker");
|
||||
}
|
||||
|
@ -1768,7 +1768,7 @@ bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) {
|
|||
inuse_allocs_increase_ = static_cast<ssize_t>(leaks->total().allocs);
|
||||
if (leaks->Empty()) {
|
||||
heap_profile->ReleaseSnapshot(leaks);
|
||||
leaks = NULL;
|
||||
leaks = nullptr;
|
||||
|
||||
// We can only check for internal leaks along the no-user-leak
|
||||
// path since in the leak path we temporarily release
|
||||
|
@ -1804,13 +1804,13 @@ bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) {
|
|||
}
|
||||
}
|
||||
|
||||
if (leaks != NULL) {
|
||||
if (leaks != nullptr) {
|
||||
pprof_file = MakeProfileNameLocked();
|
||||
}
|
||||
}
|
||||
|
||||
has_checked_ = true;
|
||||
if (leaks == NULL) {
|
||||
if (leaks == nullptr) {
|
||||
if (FLAGS_heap_check_max_pointer_offset == -1) {
|
||||
RAW_LOG(WARNING,
|
||||
"Found no leaks without max_pointer_offset restriction: "
|
||||
|
@ -1837,13 +1837,13 @@ bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) {
|
|||
// typically only want to report once in a program's run, at the
|
||||
// very end.
|
||||
if (MallocHook::GetNewHook() == NewHook)
|
||||
MallocHook::SetNewHook(NULL);
|
||||
MallocHook::SetNewHook(nullptr);
|
||||
if (MallocHook::GetDeleteHook() == DeleteHook)
|
||||
MallocHook::SetDeleteHook(NULL);
|
||||
MallocHook::SetDeleteHook(nullptr);
|
||||
MemoryRegionMap::Shutdown();
|
||||
// Make sure all the hooks really got unset:
|
||||
RAW_CHECK(MallocHook::GetNewHook() == NULL, "");
|
||||
RAW_CHECK(MallocHook::GetDeleteHook() == NULL, "");
|
||||
RAW_CHECK(MallocHook::GetNewHook() == nullptr, "");
|
||||
RAW_CHECK(MallocHook::GetDeleteHook() == nullptr, "");
|
||||
have_disabled_hooks_for_symbolize = true;
|
||||
leaks->ReportLeaks(name_, pprof_file, true); // true = should_symbolize
|
||||
} else {
|
||||
|
@ -1862,18 +1862,18 @@ bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) {
|
|||
}
|
||||
}
|
||||
|
||||
return (leaks == NULL);
|
||||
return (leaks == nullptr);
|
||||
}
|
||||
|
||||
HeapLeakChecker::~HeapLeakChecker() {
|
||||
if (name_ != NULL) { // had leak checking enabled when created the checker
|
||||
if (name_ != nullptr) { // had leak checking enabled when created the checker
|
||||
if (!has_checked_) {
|
||||
RAW_LOG(FATAL, "Some *NoLeaks|SameHeap method"
|
||||
" must be called on any created HeapLeakChecker");
|
||||
}
|
||||
|
||||
// Deallocate any snapshot taken at start
|
||||
if (start_snapshot_ != NULL) {
|
||||
if (start_snapshot_ != nullptr) {
|
||||
SpinLockHolder l(&heap_checker_lock);
|
||||
heap_profile->ReleaseSnapshot(
|
||||
reinterpret_cast<HeapProfileTable::Snapshot*>(start_snapshot_));
|
||||
|
@ -1881,7 +1881,7 @@ HeapLeakChecker::~HeapLeakChecker() {
|
|||
|
||||
UnIgnoreObject(name_);
|
||||
delete[] name_;
|
||||
name_ = NULL;
|
||||
name_ = nullptr;
|
||||
}
|
||||
delete lock_;
|
||||
}
|
||||
|
@ -1896,12 +1896,12 @@ bool HeapLeakChecker::IsActive() {
|
|||
return heap_checker_on;
|
||||
}
|
||||
|
||||
vector<HeapCleaner::void_function>* HeapCleaner::heap_cleanups_ = NULL;
|
||||
vector<HeapCleaner::void_function>* HeapCleaner::heap_cleanups_;
|
||||
|
||||
// When a HeapCleaner object is intialized, add its function to the static list
|
||||
// of cleaners to be run before leaks checking.
|
||||
HeapCleaner::HeapCleaner(void_function f) {
|
||||
if (heap_cleanups_ == NULL)
|
||||
if (heap_cleanups_ == nullptr)
|
||||
heap_cleanups_ = new vector<HeapCleaner::void_function>;
|
||||
heap_cleanups_->push_back(f);
|
||||
}
|
||||
|
@ -1915,7 +1915,7 @@ void HeapCleaner::RunHeapCleanups() {
|
|||
f();
|
||||
}
|
||||
delete heap_cleanups_;
|
||||
heap_cleanups_ = NULL;
|
||||
heap_cleanups_ = nullptr;
|
||||
}
|
||||
|
||||
// Program exit heap cleanup registered as a module object destructor.
|
||||
|
@ -2062,7 +2062,7 @@ void HeapLeakChecker_InternalInitStart() {
|
|||
snprintf(pid_buf, sizeof(pid_buf), ".%d", main_thread_pid);
|
||||
*profile_prefix += pid_buf;
|
||||
{ SpinLockHolder l(&heap_checker_lock);
|
||||
RAW_DCHECK(profile_name_prefix == NULL, "");
|
||||
RAW_DCHECK(profile_name_prefix == nullptr, "");
|
||||
profile_name_prefix = profile_prefix;
|
||||
}
|
||||
|
||||
|
@ -2099,7 +2099,7 @@ void HeapLeakChecker_InternalInitStart() {
|
|||
if (FLAGS_heap_check != "local") {
|
||||
HeapLeakChecker* main_hc = new HeapLeakChecker();
|
||||
SpinLockHolder l(&heap_checker_lock);
|
||||
RAW_DCHECK(main_heap_checker == NULL,
|
||||
RAW_DCHECK(main_heap_checker == nullptr,
|
||||
"Repeated creation of main_heap_checker");
|
||||
main_heap_checker = main_hc;
|
||||
do_main_heap_check = true;
|
||||
|
@ -2212,7 +2212,7 @@ void HeapLeakChecker::BeforeConstructorsLocked() {
|
|||
// Set up MemoryRegionMap with (at least) one caller stack frame to record
|
||||
// (important that it's done before HeapProfileTable creation below).
|
||||
Allocator::Init();
|
||||
RAW_CHECK(heap_profile == NULL, "");
|
||||
RAW_CHECK(heap_profile == nullptr, "");
|
||||
heap_profile = new(Allocator::Allocate(sizeof(HeapProfileTable)))
|
||||
HeapProfileTable(&Allocator::Allocate, &Allocator::Free,
|
||||
/* profile_mmap */ false);
|
||||
|
@ -2317,7 +2317,7 @@ void HeapLeakChecker_AfterDestructors() {
|
|||
if (FLAGS_heap_check_after_destructors) {
|
||||
if (HeapLeakChecker::DoMainHeapCheck()) {
|
||||
const struct timespec sleep_time = { 0, 500000000 }; // 500 ms
|
||||
nanosleep(&sleep_time, NULL);
|
||||
nanosleep(&sleep_time, nullptr);
|
||||
// Need this hack to wait for other pthreads to exit.
|
||||
// Otherwise tcmalloc find errors
|
||||
// on a free() call from pthreads.
|
||||
|
@ -2339,7 +2339,7 @@ void HeapLeakChecker::DisableChecksFromToLocked(const void* start_address,
|
|||
int max_depth) {
|
||||
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
|
||||
RAW_DCHECK(start_address < end_address, "");
|
||||
if (disabled_ranges == NULL) {
|
||||
if (disabled_ranges == nullptr) {
|
||||
disabled_ranges = new(Allocator::Allocate(sizeof(DisabledRangeMap)))
|
||||
DisabledRangeMap;
|
||||
}
|
||||
|
|
|
@ -41,12 +41,12 @@
|
|||
// are implemented with the member "HeapProfileBucket* next".
|
||||
//
|
||||
// A structure of a hash table HeapProfileBucket** bucket_table would be like:
|
||||
// bucket_table[0] => NULL
|
||||
// bucket_table[1] => HeapProfileBucket() => HeapProfileBucket() => NULL
|
||||
// bucket_table[0] => nullptr
|
||||
// bucket_table[1] => HeapProfileBucket() => HeapProfileBucket() => nullptr
|
||||
// ...
|
||||
// bucket_table[i] => HeapProfileBucket() => NULL
|
||||
// bucket_table[i] => HeapProfileBucket() => nullptr
|
||||
// ...
|
||||
// bucket_table[n] => HeapProfileBucket() => NULL
|
||||
// bucket_table[n] => HeapProfileBucket() => nullptr
|
||||
|
||||
#ifndef HEAP_PROFILE_STATS_H_
|
||||
#define HEAP_PROFILE_STATS_H_
|
||||
|
|
|
@ -117,9 +117,9 @@ HeapProfileTable::HeapProfileTable(Allocator alloc,
|
|||
: alloc_(alloc),
|
||||
dealloc_(dealloc),
|
||||
profile_mmap_(profile_mmap),
|
||||
bucket_table_(NULL),
|
||||
bucket_table_(nullptr),
|
||||
num_buckets_(0),
|
||||
address_map_(NULL) {
|
||||
address_map_(nullptr) {
|
||||
// Make a hash table for buckets.
|
||||
const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
|
||||
bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
|
||||
|
@ -138,7 +138,7 @@ HeapProfileTable::~HeapProfileTable() {
|
|||
// Free the allocation map.
|
||||
address_map_->~AllocationMap();
|
||||
dealloc_(address_map_);
|
||||
address_map_ = NULL;
|
||||
address_map_ = nullptr;
|
||||
|
||||
// Free the hash table.
|
||||
for (int i = 0; i < kHashTableSize; i++) {
|
||||
|
@ -150,7 +150,7 @@ HeapProfileTable::~HeapProfileTable() {
|
|||
}
|
||||
}
|
||||
dealloc_(bucket_table_);
|
||||
bucket_table_ = NULL;
|
||||
bucket_table_ = nullptr;
|
||||
}
|
||||
|
||||
HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
|
||||
|
@ -224,19 +224,19 @@ void HeapProfileTable::RecordFree(const void* ptr) {
|
|||
|
||||
bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
|
||||
const AllocValue* alloc_value = address_map_->Find(ptr);
|
||||
if (alloc_value != NULL) *object_size = alloc_value->bytes;
|
||||
return alloc_value != NULL;
|
||||
if (alloc_value != nullptr) *object_size = alloc_value->bytes;
|
||||
return alloc_value != nullptr;
|
||||
}
|
||||
|
||||
bool HeapProfileTable::FindAllocDetails(const void* ptr,
|
||||
AllocInfo* info) const {
|
||||
const AllocValue* alloc_value = address_map_->Find(ptr);
|
||||
if (alloc_value != NULL) {
|
||||
if (alloc_value != nullptr) {
|
||||
info->object_size = alloc_value->bytes;
|
||||
info->call_stack = alloc_value->bucket()->stack;
|
||||
info->stack_depth = alloc_value->bucket()->depth;
|
||||
}
|
||||
return alloc_value != NULL;
|
||||
return alloc_value != nullptr;
|
||||
}
|
||||
|
||||
bool HeapProfileTable::FindInsideAlloc(const void* ptr,
|
||||
|
@ -245,8 +245,8 @@ bool HeapProfileTable::FindInsideAlloc(const void* ptr,
|
|||
size_t* object_size) const {
|
||||
const AllocValue* alloc_value =
|
||||
address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
|
||||
if (alloc_value != NULL) *object_size = alloc_value->bytes;
|
||||
return alloc_value != NULL;
|
||||
if (alloc_value != nullptr) *object_size = alloc_value->bytes;
|
||||
return alloc_value != nullptr;
|
||||
}
|
||||
|
||||
bool HeapProfileTable::MarkAsLive(const void* ptr) {
|
||||
|
@ -354,7 +354,7 @@ void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
|
|||
string pattern = string(prefix) + ".*" + kFileExt;
|
||||
#if defined(HAVE_GLOB_H)
|
||||
glob_t g;
|
||||
const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
|
||||
const int r = glob(pattern.c_str(), GLOB_ERR, nullptr, &g);
|
||||
if (r == 0 || r == GLOB_NOMATCH) {
|
||||
const int prefix_length = strlen(prefix);
|
||||
for (int i = 0; i < g.gl_pathc; i++) {
|
||||
|
|
|
@ -161,9 +161,10 @@ class HeapProfileTable {
|
|||
void ReleaseSnapshot(Snapshot* snapshot);
|
||||
|
||||
// Return a snapshot of every non-live, non-ignored object in *this.
|
||||
// If "base" is non-NULL, skip any objects present in "base".
|
||||
// As a side-effect, clears the "live" bit on every live object in *this.
|
||||
// Caller must call ReleaseSnapshot() on result when no longer needed.
|
||||
// If "base" is non-nullptr, skip any objects present in "base". As
|
||||
// a side-effect, clears the "live" bit on every live object in
|
||||
// *this. Caller must call ReleaseSnapshot() on result when no
|
||||
// longer needed.
|
||||
Snapshot* NonLiveSnapshot(Snapshot* base);
|
||||
|
||||
private:
|
||||
|
|
|
@ -156,17 +156,17 @@ static void ProfilerFree(void* p) {
|
|||
//----------------------------------------------------------------------
|
||||
|
||||
// Access to all of these is protected by heap_lock.
|
||||
static bool is_on = false; // If are on as a subsytem.
|
||||
static bool dumping = false; // Dumping status to prevent recursion
|
||||
static char* filename_prefix = NULL; // Prefix used for profile file names
|
||||
// (NULL if no need for dumping yet)
|
||||
static int dump_count = 0; // How many dumps so far
|
||||
static int64_t last_dump_alloc = 0; // alloc_size when did we last dump
|
||||
static int64_t last_dump_free = 0; // free_size when did we last dump
|
||||
static int64_t high_water_mark = 0; // In-use-bytes at last high-water dump
|
||||
static int64_t last_dump_time = 0; // The time of the last dump
|
||||
static bool is_on; // If are on as a subsytem.
|
||||
static bool dumping; // Dumping status to prevent recursion
|
||||
static char* filename_prefix; // Prefix used for profile file names
|
||||
// (nullptr if no need for dumping yet)
|
||||
static int dump_count; // How many dumps so far
|
||||
static int64_t last_dump_alloc; // alloc_size when did we last dump
|
||||
static int64_t last_dump_free; // free_size when did we last dump
|
||||
static int64_t high_water_mark; // In-use-bytes at last high-water dump
|
||||
static int64_t last_dump_time; // The time of the last dump
|
||||
|
||||
static HeapProfileTable* heap_profile = NULL; // the heap profile table
|
||||
static HeapProfileTable* heap_profile; // the heap profile table
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
// Profile generation
|
||||
|
@ -199,7 +199,7 @@ static void DumpProfileLocked(const char* reason) {
|
|||
RAW_DCHECK(is_on, "");
|
||||
RAW_DCHECK(!dumping, "");
|
||||
|
||||
if (filename_prefix == NULL) return; // we do not yet need dumping
|
||||
if (filename_prefix == nullptr) return; // we do not yet need dumping
|
||||
|
||||
dumping = true;
|
||||
|
||||
|
@ -268,7 +268,7 @@ static void MaybeDumpProfileLocked() {
|
|||
inuse_bytes >> 20);
|
||||
need_to_dump = true;
|
||||
} else if (FLAGS_heap_profile_time_interval > 0 ) {
|
||||
int64_t current_time = time(NULL);
|
||||
int64_t current_time = time(nullptr);
|
||||
if (current_time - last_dump_time >=
|
||||
FLAGS_heap_profile_time_interval) {
|
||||
snprintf(buf, sizeof(buf), "%" PRId64 " sec since the last dump",
|
||||
|
@ -315,12 +315,12 @@ static void RecordFree(const void* ptr) {
|
|||
|
||||
// static
|
||||
void NewHook(const void* ptr, size_t size) {
|
||||
if (ptr != NULL) RecordAlloc(ptr, size, 0);
|
||||
if (ptr != nullptr) RecordAlloc(ptr, size, 0);
|
||||
}
|
||||
|
||||
// static
|
||||
void DeleteHook(const void* ptr) {
|
||||
if (ptr != NULL) RecordFree(ptr);
|
||||
if (ptr != nullptr) RecordFree(ptr);
|
||||
}
|
||||
|
||||
static tcmalloc::MappingHookSpace mmap_logging_hook_space;
|
||||
|
@ -332,7 +332,7 @@ static void LogMappingEvent(const tcmalloc::MappingEvent& evt) {
|
|||
|
||||
if (evt.file_valid) {
|
||||
// We use PRIxPTR not just '%p' to avoid deadlocks
|
||||
// in pretty-printing of NULL as "nil".
|
||||
// in pretty-printing of nullptr as "nil".
|
||||
// TODO(maxim): instead should use a safe snprintf reimplementation
|
||||
RAW_LOG(INFO,
|
||||
"mmap(start=0x%" PRIxPTR ", len=%zu, prot=0x%x, flags=0x%x, "
|
||||
|
@ -342,7 +342,7 @@ static void LogMappingEvent(const tcmalloc::MappingEvent& evt) {
|
|||
(uintptr_t) evt.after_address);
|
||||
} else if (evt.after_valid && evt.before_valid) {
|
||||
// We use PRIxPTR not just '%p' to avoid deadlocks
|
||||
// in pretty-printing of NULL as "nil".
|
||||
// in pretty-printing of nullptr as "nil".
|
||||
// TODO(maxim): instead should use a safe snprintf reimplementation
|
||||
RAW_LOG(INFO,
|
||||
"mremap(old_addr=0x%" PRIxPTR ", old_size=%zu, "
|
||||
|
@ -365,7 +365,7 @@ static void LogMappingEvent(const tcmalloc::MappingEvent& evt) {
|
|||
increment, (uintptr_t) result);
|
||||
} else if (evt.before_valid) {
|
||||
// We use PRIxPTR not just '%p' to avoid deadlocks
|
||||
// in pretty-printing of NULL as "nil".
|
||||
// in pretty-printing of nullptr as "nil".
|
||||
// TODO(maxim): instead should use a safe snprintf reimplementation
|
||||
RAW_LOG(INFO, "munmap(start=0x%" PRIxPTR ", len=%zu)",
|
||||
(uintptr_t) evt.before_address, evt.before_length);
|
||||
|
@ -426,7 +426,7 @@ extern "C" void HeapProfilerStart(const char* prefix) {
|
|||
}
|
||||
|
||||
// Copy filename prefix
|
||||
RAW_DCHECK(filename_prefix == NULL, "");
|
||||
RAW_DCHECK(filename_prefix == nullptr, "");
|
||||
const int prefix_length = strlen(prefix);
|
||||
filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
|
||||
memcpy(filename_prefix, prefix, prefix_length);
|
||||
|
@ -456,11 +456,11 @@ extern "C" void HeapProfilerStop() {
|
|||
// free profile
|
||||
heap_profile->~HeapProfileTable();
|
||||
ProfilerFree(heap_profile);
|
||||
heap_profile = NULL;
|
||||
heap_profile = nullptr;
|
||||
|
||||
// free prefix
|
||||
ProfilerFree(filename_prefix);
|
||||
filename_prefix = NULL;
|
||||
filename_prefix = nullptr;
|
||||
|
||||
if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
|
||||
RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
|
||||
|
@ -515,8 +515,8 @@ static void HeapProfilerInit() {
|
|||
#endif
|
||||
|
||||
char *signal_number_str = getenv("HEAPPROFILESIGNAL");
|
||||
if (signal_number_str != NULL) {
|
||||
long int signal_number = strtol(signal_number_str, NULL, 10);
|
||||
if (signal_number_str != nullptr) {
|
||||
long int signal_number = strtol(signal_number_str, nullptr, 10);
|
||||
intptr_t old_signal_handler = reinterpret_cast<intptr_t>(signal(signal_number, HeapProfilerDumpSignal));
|
||||
if (old_signal_handler == reinterpret_cast<intptr_t>(SIG_ERR)) {
|
||||
RAW_LOG(FATAL, "Failed to set signal. Perhaps signal number %s is invalid\n", signal_number_str);
|
||||
|
|
|
@ -221,7 +221,7 @@ extern "C" {
|
|||
} // extern "C"
|
||||
|
||||
static malloc_zone_t *get_default_zone() {
|
||||
malloc_zone_t **zones = NULL;
|
||||
malloc_zone_t **zones = nullptr;
|
||||
unsigned int num_zones = 0;
|
||||
|
||||
/*
|
||||
|
@ -234,7 +234,7 @@ static malloc_zone_t *get_default_zone() {
|
|||
* So get the list of zones to get the first one, instead of relying on
|
||||
* malloc_default_zone.
|
||||
*/
|
||||
if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**) &zones,
|
||||
if (KERN_SUCCESS != malloc_get_all_zones(0, nullptr, (vm_address_t**) &zones,
|
||||
&num_zones)) {
|
||||
/* Reset the value in case the failure happened after it was set. */
|
||||
num_zones = 0;
|
||||
|
@ -272,8 +272,8 @@ static void ReplaceSystemAlloc() {
|
|||
tcmalloc_zone.free = &mz_free;
|
||||
tcmalloc_zone.realloc = &mz_realloc;
|
||||
tcmalloc_zone.destroy = &mz_destroy;
|
||||
tcmalloc_zone.batch_malloc = NULL;
|
||||
tcmalloc_zone.batch_free = NULL;
|
||||
tcmalloc_zone.batch_malloc = nullptr;
|
||||
tcmalloc_zone.batch_free = nullptr;
|
||||
tcmalloc_zone.introspect = &tcmalloc_introspection;
|
||||
|
||||
// from AvailabilityMacros.h
|
||||
|
|
|
@ -74,12 +74,12 @@ inline bool SLL_TryPop(void **list, void **rv) {
|
|||
|
||||
// Remove N elements from a linked list to which head points. head will be
|
||||
// modified to point to the new head. start and end will point to the first
|
||||
// and last nodes of the range. Note that end will point to NULL after this
|
||||
// and last nodes of the range. Note that end will point to nullptr after this
|
||||
// function is called.
|
||||
inline void SLL_PopRange(void **head, int N, void **start, void **end) {
|
||||
if (N == 0) {
|
||||
*start = NULL;
|
||||
*end = NULL;
|
||||
*start = nullptr;
|
||||
*end = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ inline void SLL_PopRange(void **head, int N, void **start, void **end) {
|
|||
*end = tmp;
|
||||
*head = SLL_Next(tmp);
|
||||
// Unlink range from list.
|
||||
SLL_SetNext(tmp, NULL);
|
||||
SLL_SetNext(tmp, nullptr);
|
||||
}
|
||||
|
||||
inline void SLL_PushRange(void **head, void *start, void *end) {
|
||||
|
|
|
@ -96,11 +96,11 @@ bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total,
|
|||
}
|
||||
|
||||
void** MallocExtension::ReadStackTraces(int* sample_period) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void** MallocExtension::ReadHeapGrowthStackTraces() {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MallocExtension::MarkThreadIdle() {
|
||||
|
@ -112,7 +112,7 @@ void MallocExtension::MarkThreadBusy() {
|
|||
}
|
||||
|
||||
SysAllocator* MallocExtension::GetSystemAllocator() {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MallocExtension::SetSystemAllocator(SysAllocator *a) {
|
||||
|
@ -252,7 +252,7 @@ void PrintStackEntry(MallocExtensionWriter* writer, void** entry) {
|
|||
void MallocExtension::GetHeapSample(MallocExtensionWriter* writer) {
|
||||
int sample_period = 0;
|
||||
void** entries = ReadStackTraces(&sample_period);
|
||||
if (entries == NULL) {
|
||||
if (entries == nullptr) {
|
||||
const char* const kErrorMsg =
|
||||
"This malloc implementation does not support sampling.\n"
|
||||
"As of 2005/01/26, only tcmalloc supports sampling, and\n"
|
||||
|
@ -275,7 +275,7 @@ void MallocExtension::GetHeapSample(MallocExtensionWriter* writer) {
|
|||
|
||||
void MallocExtension::GetHeapGrowthStacks(MallocExtensionWriter* writer) {
|
||||
void** entries = ReadHeapGrowthStackTraces();
|
||||
if (entries == NULL) {
|
||||
if (entries == nullptr) {
|
||||
const char* const kErrorMsg =
|
||||
"This malloc implementation does not support "
|
||||
"ReadHeapGrowthStackTraces().\n"
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include <errno.h> // for errno, EINVAL
|
||||
#include <inttypes.h> // for PRId64
|
||||
#include <limits.h> // for PATH_MAX
|
||||
#include <stddef.h> // for size_t, NULL
|
||||
#include <stddef.h> // for size_t
|
||||
#include <stdint.h> // for int64_t, uintptr_t
|
||||
#include <stdio.h> // for snprintf
|
||||
#include <stdlib.h> // for mkstemp
|
||||
|
@ -128,7 +128,7 @@ void* HugetlbSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
// We don't respond to allocation requests smaller than big_page_size_ unless
|
||||
// the caller is ok to take more than they asked for. Used by MetaDataAlloc.
|
||||
if (!FLAGS_memfs_malloc_disable_fallback &&
|
||||
actual_size == NULL && size < big_page_size_) {
|
||||
actual_size == nullptr && size < big_page_size_) {
|
||||
return fallback_->Alloc(size, actual_size, alignment);
|
||||
}
|
||||
|
||||
|
@ -142,10 +142,10 @@ void* HugetlbSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
}
|
||||
|
||||
void* result = AllocInternal(aligned_size, actual_size, new_alignment);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
} else if (FLAGS_memfs_malloc_disable_fallback) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
Log(kLog, __FILE__, __LINE__,
|
||||
"HugetlbSysAllocator: (failed, allocated)", failed_, hugetlb_base_);
|
||||
|
@ -176,7 +176,7 @@ void* HugetlbSysAllocator::AllocInternal(size_t size, size_t* actual_size,
|
|||
Log(kLog, __FILE__, __LINE__,
|
||||
"alloc too large (size, bytes left)", size, limit-hugetlb_base_);
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// This is not needed for hugetlbfs, but needed for tmpfs. Annoyingly
|
||||
|
@ -186,7 +186,7 @@ void* HugetlbSysAllocator::AllocInternal(size_t size, size_t* actual_size,
|
|||
Log(kLog, __FILE__, __LINE__,
|
||||
"ftruncate failed", tcmalloc::SafeStrError(errno).c_str());
|
||||
failed_ = true;
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Note: size + extra does not overflow since:
|
||||
|
@ -204,7 +204,7 @@ void* HugetlbSysAllocator::AllocInternal(size_t size, size_t* actual_size,
|
|||
tcmalloc::SafeStrError(errno).c_str());
|
||||
failed_ = true;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
|
||||
|
||||
|
|
|
@ -127,19 +127,19 @@ using std::max;
|
|||
|
||||
// ========================================================================= //
|
||||
|
||||
int MemoryRegionMap::client_count_ = 0;
|
||||
int MemoryRegionMap::max_stack_depth_ = 0;
|
||||
MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = nullptr;
|
||||
LowLevelAlloc::Arena* MemoryRegionMap::arena_ = nullptr;
|
||||
int MemoryRegionMap::client_count_;
|
||||
int MemoryRegionMap::max_stack_depth_;
|
||||
MemoryRegionMap::RegionSet* MemoryRegionMap::regions_;
|
||||
LowLevelAlloc::Arena* MemoryRegionMap::arena_;
|
||||
SpinLock MemoryRegionMap::lock_;
|
||||
SpinLock MemoryRegionMap::owner_lock_; // ACQUIRED_AFTER(lock_)
|
||||
int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_)
|
||||
int MemoryRegionMap::recursion_count_; // GUARDED_BY(owner_lock_)
|
||||
uintptr_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_)
|
||||
int64_t MemoryRegionMap::map_size_ = 0;
|
||||
int64_t MemoryRegionMap::unmap_size_ = 0;
|
||||
HeapProfileBucket** MemoryRegionMap::bucket_table_ = nullptr; // GUARDED_BY(lock_)
|
||||
int MemoryRegionMap::num_buckets_ = 0; // GUARDED_BY(lock_)
|
||||
int MemoryRegionMap::saved_buckets_count_ = 0; // GUARDED_BY(lock_)
|
||||
int64_t MemoryRegionMap::map_size_;
|
||||
int64_t MemoryRegionMap::unmap_size_;
|
||||
HeapProfileBucket** MemoryRegionMap::bucket_table_; // GUARDED_BY(lock_)
|
||||
int MemoryRegionMap::num_buckets_; // GUARDED_BY(lock_)
|
||||
int MemoryRegionMap::saved_buckets_count_; // GUARDED_BY(lock_)
|
||||
HeapProfileBucket MemoryRegionMap::saved_buckets_[20]; // GUARDED_BY(lock_)
|
||||
// GUARDED_BY(lock_)
|
||||
const void* MemoryRegionMap::saved_buckets_keys_[20][kMaxStackDepth];
|
||||
|
@ -197,7 +197,7 @@ void MemoryRegionMap::Init(int max_stack_depth, bool use_buckets) NO_THREAD_SAFE
|
|||
memset(bucket_table_, 0, table_bytes);
|
||||
num_buckets_ = 0;
|
||||
}
|
||||
if (regions_ == NULL) { // init regions_
|
||||
if (regions_ == nullptr) { // init regions_
|
||||
InitRegionSetLocked();
|
||||
}
|
||||
Unlock();
|
||||
|
@ -214,7 +214,7 @@ bool MemoryRegionMap::Shutdown() NO_THREAD_SAFETY_ANALYSIS {
|
|||
RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
|
||||
return true;
|
||||
}
|
||||
if (bucket_table_ != NULL) {
|
||||
if (bucket_table_ != nullptr) {
|
||||
for (int i = 0; i < kHashTableSize; i++) {
|
||||
for (HeapProfileBucket* curr = bucket_table_[i]; curr != 0; /**/) {
|
||||
HeapProfileBucket* bucket = curr;
|
||||
|
@ -225,13 +225,13 @@ bool MemoryRegionMap::Shutdown() NO_THREAD_SAFETY_ANALYSIS {
|
|||
}
|
||||
MyAllocator::Free(bucket_table_, 0);
|
||||
num_buckets_ = 0;
|
||||
bucket_table_ = NULL;
|
||||
bucket_table_ = nullptr;
|
||||
}
|
||||
|
||||
tcmalloc::UnHookMMapEvents(&mapping_hook_space_);
|
||||
|
||||
if (regions_) regions_->~RegionSet();
|
||||
regions_ = NULL;
|
||||
regions_ = nullptr;
|
||||
bool deleted_arena = LowLevelAlloc::DeleteArena(arena_);
|
||||
if (deleted_arena) {
|
||||
arena_ = 0;
|
||||
|
@ -299,7 +299,7 @@ bool MemoryRegionMap::LockIsHeld() {
|
|||
const MemoryRegionMap::Region*
|
||||
MemoryRegionMap::DoFindRegionLocked(uintptr_t addr) {
|
||||
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
|
||||
if (regions_ != NULL) {
|
||||
if (regions_ != nullptr) {
|
||||
Region sample;
|
||||
sample.SetRegionSetKey(addr);
|
||||
RegionSet::iterator region = regions_->lower_bound(sample);
|
||||
|
@ -310,22 +310,22 @@ MemoryRegionMap::DoFindRegionLocked(uintptr_t addr) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool MemoryRegionMap::FindRegion(uintptr_t addr, Region* result) {
|
||||
Lock();
|
||||
const Region* region = DoFindRegionLocked(addr);
|
||||
if (region != NULL) *result = *region; // create it as an independent copy
|
||||
if (region != nullptr) *result = *region; // create it as an independent copy
|
||||
Unlock();
|
||||
return region != NULL;
|
||||
return region != nullptr;
|
||||
}
|
||||
|
||||
bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
|
||||
Region* result) {
|
||||
Lock();
|
||||
const Region* region = DoFindRegionLocked(stack_top);
|
||||
if (region != NULL) {
|
||||
if (region != nullptr) {
|
||||
RAW_VLOG(10, "Stack at %p is inside region %p..%p",
|
||||
reinterpret_cast<void*>(stack_top),
|
||||
reinterpret_cast<void*>(region->start_addr),
|
||||
|
@ -335,7 +335,7 @@ bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
|
|||
*result = *region; // create *result as an independent copy
|
||||
}
|
||||
Unlock();
|
||||
return region != NULL;
|
||||
return region != nullptr;
|
||||
}
|
||||
|
||||
HeapProfileBucket* MemoryRegionMap::GetBucket(int depth,
|
||||
|
@ -372,7 +372,7 @@ HeapProfileBucket* MemoryRegionMap::GetBucket(int depth,
|
|||
memset(bucket, 0, sizeof(*bucket));
|
||||
++saved_buckets_count_;
|
||||
bucket->stack = key_copy;
|
||||
bucket->next = NULL;
|
||||
bucket->next = nullptr;
|
||||
} else {
|
||||
recursive_insert = true;
|
||||
const void** key_copy = static_cast<const void**>(
|
||||
|
@ -396,13 +396,13 @@ HeapProfileBucket* MemoryRegionMap::GetBucket(int depth,
|
|||
|
||||
MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() {
|
||||
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
|
||||
RAW_CHECK(regions_ != NULL, "");
|
||||
RAW_CHECK(regions_ != nullptr, "");
|
||||
return regions_->begin();
|
||||
}
|
||||
|
||||
MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() {
|
||||
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
|
||||
RAW_CHECK(regions_ != NULL, "");
|
||||
RAW_CHECK(regions_ != nullptr, "");
|
||||
return regions_->end();
|
||||
}
|
||||
|
||||
|
@ -528,7 +528,7 @@ inline void MemoryRegionMap::InsertRegionLocked(const Region& region) {
|
|||
// then increment saved_regions_count.
|
||||
saved_regions[saved_regions_count++] = region;
|
||||
} else { // not a recusrive call
|
||||
if (regions_ == NULL) { // init regions_
|
||||
if (regions_ == nullptr) { // init regions_
|
||||
InitRegionSetLocked();
|
||||
}
|
||||
recursive_insert = true;
|
||||
|
@ -559,7 +559,7 @@ void MemoryRegionMap::RecordRegionAddition(const void* start, size_t size,
|
|||
InsertRegionLocked(region);
|
||||
// This will (eventually) allocate storage for and copy over the stack data
|
||||
// from region.call_stack_data_ that is pointed by region.call_stack().
|
||||
if (bucket_table_ != NULL) {
|
||||
if (bucket_table_ != nullptr) {
|
||||
HeapProfileBucket* b = GetBucket(stack_depth, region.call_stack);
|
||||
++b->allocs;
|
||||
b->alloc_size += size;
|
||||
|
@ -602,7 +602,7 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (regions_ == NULL) { // We must have just unset the hooks,
|
||||
if (regions_ == nullptr) { // We must have just unset the hooks,
|
||||
// but this thread was already inside the hook.
|
||||
Unlock();
|
||||
return;
|
||||
|
@ -694,7 +694,7 @@ void MemoryRegionMap::RecordRegionRemovalInBucket(int depth,
|
|||
const void* const stack[],
|
||||
size_t size) {
|
||||
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
|
||||
if (bucket_table_ == NULL) return;
|
||||
if (bucket_table_ == nullptr) return;
|
||||
HeapProfileBucket* b = GetBucket(depth, stack);
|
||||
++b->frees;
|
||||
b->free_size += size;
|
||||
|
|
|
@ -142,13 +142,13 @@ class MemoryRegionMap {
|
|||
// Convenience accessor for call_stack[0],
|
||||
// i.e. (the program counter of) the immediate caller
|
||||
// of this region's allocation function,
|
||||
// but it also returns NULL when call_stack_depth is 0,
|
||||
// but it also returns nullptr when call_stack_depth is 0,
|
||||
// i.e whe we weren't able to get the call stack.
|
||||
// This usually happens in recursive calls, when the stack-unwinder
|
||||
// calls mmap() which in turn calls the stack-unwinder.
|
||||
uintptr_t caller() const {
|
||||
return reinterpret_cast<uintptr_t>(call_stack_depth >= 1
|
||||
? call_stack[0] : NULL);
|
||||
? call_stack[0] : nullptr);
|
||||
}
|
||||
|
||||
// Return true iff this region overlaps region x.
|
||||
|
@ -344,7 +344,7 @@ class MemoryRegionMap {
|
|||
// helpers ==================================================================
|
||||
|
||||
// Helper for FindRegion and FindAndMarkStackRegion:
|
||||
// returns the region covering 'addr' or NULL; assumes our lock_ is held.
|
||||
// returns the region covering 'addr' or nullptr; assumes our lock_ is held.
|
||||
static const Region* DoFindRegionLocked(uintptr_t addr);
|
||||
|
||||
// Verifying wrapper around regions_->insert(region)
|
||||
|
@ -395,7 +395,7 @@ template <class Body>
|
|||
void MemoryRegionMap::IterateBuckets(Body body) {
|
||||
for (int index = 0; index < kHashTableSize; index++) {
|
||||
for (HeapProfileBucket* bucket = bucket_table_[index];
|
||||
bucket != NULL;
|
||||
bucket != nullptr;
|
||||
bucket = bucket->next) {
|
||||
body(bucket);
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ Span* PageHeap::SearchFreeAndLargeLists(Length n) {
|
|||
}
|
||||
}
|
||||
// No luck in free lists, our last chance is in a larger class.
|
||||
return AllocLarge(n); // May be NULL
|
||||
return AllocLarge(n); // May be nullptr
|
||||
}
|
||||
|
||||
static const size_t kForcedCoalesceInterval = 128*1024*1024;
|
||||
|
@ -178,7 +178,7 @@ Span* PageHeap::NewLocked(Length n, LockingContext* context) {
|
|||
n = RoundUpSize(n);
|
||||
|
||||
Span* result = SearchFreeAndLargeLists(n);
|
||||
if (result != NULL)
|
||||
if (result != nullptr)
|
||||
return result;
|
||||
|
||||
if (stats_.free_bytes != 0 && stats_.unmapped_bytes != 0
|
||||
|
@ -210,7 +210,7 @@ Span* PageHeap::NewLocked(Length n, LockingContext* context) {
|
|||
// unlucky memory fragmentation we'll be consuming virtual address
|
||||
// space, but not real memory
|
||||
result = SearchFreeAndLargeLists(n);
|
||||
if (result != NULL) return result;
|
||||
if (result != nullptr) return result;
|
||||
}
|
||||
|
||||
// Grow the heap and try again.
|
||||
|
@ -223,7 +223,7 @@ Span* PageHeap::NewLocked(Length n, LockingContext* context) {
|
|||
// Setting errno to ENOMEM here allows us to avoid dealing with it
|
||||
// in fast-path.
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return SearchFreeAndLargeLists(n);
|
||||
}
|
||||
|
@ -270,8 +270,8 @@ Span* PageHeap::NewAligned(Length n, Length align_pages) {
|
|||
|
||||
Span* PageHeap::AllocLarge(Length n) {
|
||||
ASSERT(lock_.IsHeld());
|
||||
Span *best = NULL;
|
||||
Span *best_normal = NULL;
|
||||
Span *best = nullptr;
|
||||
Span *best_normal = nullptr;
|
||||
|
||||
// Create a Span to use as an upper bound.
|
||||
Span bound;
|
||||
|
@ -291,14 +291,14 @@ Span* PageHeap::AllocLarge(Length n) {
|
|||
if (place != large_returned_.end()) {
|
||||
Span *c = place->span;
|
||||
ASSERT(c->location == Span::ON_RETURNED_FREELIST);
|
||||
if (best_normal == NULL
|
||||
if (best_normal == nullptr
|
||||
|| c->length < best->length
|
||||
|| (c->length == best->length && c->start < best->start))
|
||||
best = place->span;
|
||||
}
|
||||
|
||||
if (best == best_normal) {
|
||||
return best == NULL ? NULL : Carve(best, n);
|
||||
return best == nullptr ? nullptr : Carve(best, n);
|
||||
}
|
||||
|
||||
// best comes from RETURNED set.
|
||||
|
@ -315,9 +315,9 @@ Span* PageHeap::AllocLarge(Length n) {
|
|||
}
|
||||
|
||||
// If best_normal existed, EnsureLimit would succeeded:
|
||||
ASSERT(best_normal == NULL);
|
||||
ASSERT(best_normal == nullptr);
|
||||
// We are not allowed to take best from returned list.
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Span* PageHeap::Split(Span* span, Length n) {
|
||||
|
@ -375,12 +375,12 @@ Span* PageHeap::Carve(Span* span, Length n) {
|
|||
|
||||
// The previous span of |leftover| was just splitted -- no need to
|
||||
// coalesce them. The next span of |leftover| was not previously coalesced
|
||||
// with |span|, i.e. is NULL or has got location other than |old_location|.
|
||||
// with |span|, i.e. is nullptr or has got location other than |old_location|.
|
||||
#ifndef NDEBUG
|
||||
const PageID p = leftover->start;
|
||||
const Length len = leftover->length;
|
||||
Span* next = GetDescriptor(p+len);
|
||||
ASSERT (next == NULL ||
|
||||
ASSERT (next == nullptr ||
|
||||
next->location == Span::IN_USE ||
|
||||
next->location != leftover->location);
|
||||
#endif
|
||||
|
@ -426,9 +426,9 @@ void PageHeap::DeleteLocked(Span* span) {
|
|||
// checks if 'other' span is mergable with 'span'. If it is, removes
|
||||
// other span from free list, performs aggressive decommit if
|
||||
// necessary and returns 'other' span. Otherwise 'other' span cannot
|
||||
// be merged and is left untouched. In that case NULL is returned.
|
||||
// be merged and is left untouched. In that case nullptr is returned.
|
||||
Span* PageHeap::CheckAndHandlePreMerge(Span* span, Span* other) {
|
||||
if (other == NULL) {
|
||||
if (other == nullptr) {
|
||||
return other;
|
||||
}
|
||||
// if we're in aggressive decommit mode and span is decommitted,
|
||||
|
@ -437,10 +437,10 @@ Span* PageHeap::CheckAndHandlePreMerge(Span* span, Span* other) {
|
|||
&& span->location == Span::ON_RETURNED_FREELIST) {
|
||||
bool worked = DecommitSpan(other);
|
||||
if (!worked) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} else if (other->location != span->location) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
RemoveFromFreeList(other);
|
||||
|
@ -478,7 +478,7 @@ void PageHeap::MergeIntoFreeList(Span* span) {
|
|||
}
|
||||
|
||||
Span* prev = CheckAndHandlePreMerge(span, GetDescriptor(p-1));
|
||||
if (prev != NULL) {
|
||||
if (prev != nullptr) {
|
||||
// Merge preceding span into this span
|
||||
ASSERT(prev->start + prev->length == p);
|
||||
const Length len = prev->length;
|
||||
|
@ -488,7 +488,7 @@ void PageHeap::MergeIntoFreeList(Span* span) {
|
|||
pagemap_.set(span->start, span);
|
||||
}
|
||||
Span* next = CheckAndHandlePreMerge(span, GetDescriptor(p+n));
|
||||
if (next != NULL) {
|
||||
if (next != nullptr) {
|
||||
// Merge next span into this span
|
||||
ASSERT(next->start == p+n);
|
||||
const Length len = next->length;
|
||||
|
@ -695,7 +695,7 @@ void PageHeap::GetLargeSpanStatsLocked(LargeSpanStats* result) {
|
|||
bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
|
||||
ASSERT(lock_.IsHeld());
|
||||
Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
|
||||
if (span == NULL) {
|
||||
if (span == nullptr) {
|
||||
return false;
|
||||
}
|
||||
r->address = span->start << kPageShift;
|
||||
|
@ -730,11 +730,11 @@ bool PageHeap::GrowHeap(Length n, LockingContext* context) {
|
|||
if (n > kMaxValidPages) return false;
|
||||
Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
|
||||
size_t actual_size;
|
||||
void* ptr = NULL;
|
||||
void* ptr = nullptr;
|
||||
if (EnsureLimit(ask)) {
|
||||
ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
|
||||
}
|
||||
if (ptr == NULL) {
|
||||
if (ptr == nullptr) {
|
||||
if (n < ask) {
|
||||
// Try growing just "n" pages
|
||||
ask = n;
|
||||
|
@ -742,7 +742,7 @@ bool PageHeap::GrowHeap(Length n, LockingContext* context) {
|
|||
ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
|
||||
}
|
||||
}
|
||||
if (ptr == NULL) return false;
|
||||
if (ptr == nullptr) return false;
|
||||
}
|
||||
ask = actual_size >> kPageShift;
|
||||
context->grown_by += ask << kPageShift;
|
||||
|
|
|
@ -147,7 +147,7 @@ class PageHeap {
|
|||
return Split(span, n);
|
||||
}
|
||||
|
||||
// Return the descriptor for the specified page. Returns NULL if
|
||||
// Return the descriptor for the specified page. Returns nullptr if
|
||||
// this PageID was not allocated previously.
|
||||
ALWAYS_INLINE
|
||||
Span* GetDescriptor(PageID p) const {
|
||||
|
@ -330,7 +330,7 @@ class PageHeap {
|
|||
}
|
||||
|
||||
// Allocate a large span of length == n. If successful, returns a
|
||||
// span of exactly the specified length. Else, returns NULL.
|
||||
// span of exactly the specified length. Else, returns nullptr.
|
||||
Span* AllocLarge(Length n);
|
||||
|
||||
// Coalesce span with neighboring spans if possible, prepend to
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#ifndef TCMALLOC_PAGE_HEAP_ALLOCATOR_H_
|
||||
#define TCMALLOC_PAGE_HEAP_ALLOCATOR_H_
|
||||
|
||||
#include <stddef.h> // for NULL, size_t
|
||||
#include <stddef.h> // for size_t
|
||||
|
||||
#include "common.h" // for MetaDataAlloc
|
||||
#include "internal_logging.h" // for ASSERT
|
||||
|
@ -54,9 +54,9 @@ class PageHeapAllocator {
|
|||
void Init() {
|
||||
ASSERT(sizeof(T) <= kAllocIncrement);
|
||||
inuse_ = 0;
|
||||
free_area_ = NULL;
|
||||
free_area_ = nullptr;
|
||||
free_avail_ = 0;
|
||||
free_list_ = NULL;
|
||||
free_list_ = nullptr;
|
||||
// Reserve some space at the beginning to avoid fragmentation.
|
||||
Delete(New());
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ class PageHeapAllocator {
|
|||
T* New() {
|
||||
// Consult free list
|
||||
void* result;
|
||||
if (free_list_ != NULL) {
|
||||
if (free_list_ != nullptr) {
|
||||
result = free_list_;
|
||||
free_list_ = *(reinterpret_cast<void**>(result));
|
||||
} else {
|
||||
|
@ -72,7 +72,7 @@ class PageHeapAllocator {
|
|||
// Need more room. We assume that MetaDataAlloc returns
|
||||
// suitably aligned memory.
|
||||
free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
|
||||
if (free_area_ == NULL) {
|
||||
if (free_area_ == nullptr) {
|
||||
Log(kCrash, __FILE__, __LINE__,
|
||||
"FATAL ERROR: Out of memory trying to allocate internal "
|
||||
"tcmalloc data (bytes, object-size)",
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#include "config.h"
|
||||
|
||||
#include <stddef.h> // for NULL, size_t
|
||||
#include <stddef.h> // for size_t
|
||||
#include <string.h> // for memset
|
||||
#include <stdint.h>
|
||||
|
||||
|
@ -83,12 +83,12 @@ class TCMalloc_PageMap1 {
|
|||
|
||||
void PreallocateMoreMemory() {}
|
||||
|
||||
// Return the current value for KEY. Returns NULL if not yet set,
|
||||
// or if k is out of range.
|
||||
// Return the current value for KEY. Returns nullptr if not yet
|
||||
// set, or if k is out of range.
|
||||
ALWAYS_INLINE
|
||||
void* get(Number k) const {
|
||||
if ((k >> BITS) > 0) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return array_[k];
|
||||
}
|
||||
|
@ -101,14 +101,14 @@ class TCMalloc_PageMap1 {
|
|||
array_[k] = v;
|
||||
}
|
||||
|
||||
// Return the first non-NULL pointer found in this map for
|
||||
// a page number >= k. Returns NULL if no such number is found.
|
||||
// Return the first non-nullptr pointer found in this map for a page
|
||||
// number >= k. Returns nullptr if no such number is found.
|
||||
void* Next(Number k) const {
|
||||
while (k < (1 << BITS)) {
|
||||
if (array_[k] != NULL) return array_[k];
|
||||
if (array_[k] != nullptr) return array_[k];
|
||||
k++;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -142,8 +142,8 @@ class TCMalloc_PageMap2 {
|
|||
void* get(Number k) const {
|
||||
const Number i1 = k >> LEAF_BITS;
|
||||
const Number i2 = k & (LEAF_LENGTH-1);
|
||||
if ((k >> BITS) > 0 || root_[i1] == NULL) {
|
||||
return NULL;
|
||||
if ((k >> BITS) > 0 || root_[i1] == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
return root_[i1]->values[i2];
|
||||
}
|
||||
|
@ -164,9 +164,9 @@ class TCMalloc_PageMap2 {
|
|||
return false;
|
||||
|
||||
// Make 2nd level node if necessary
|
||||
if (root_[i1] == NULL) {
|
||||
if (root_[i1] == nullptr) {
|
||||
Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
|
||||
if (leaf == NULL) return false;
|
||||
if (leaf == nullptr) return false;
|
||||
memset(leaf, 0, sizeof(*leaf));
|
||||
root_[i1] = leaf;
|
||||
}
|
||||
|
@ -188,10 +188,10 @@ class TCMalloc_PageMap2 {
|
|||
while (k < (Number(1) << BITS)) {
|
||||
const Number i1 = k >> LEAF_BITS;
|
||||
Leaf* leaf = root_[i1];
|
||||
if (leaf != NULL) {
|
||||
if (leaf != nullptr) {
|
||||
// Scan forward in leaf
|
||||
for (Number i2 = k & (LEAF_LENGTH - 1); i2 < LEAF_LENGTH; i2++) {
|
||||
if (leaf->values[i2] != NULL) {
|
||||
if (leaf->values[i2] != nullptr) {
|
||||
return leaf->values[i2];
|
||||
}
|
||||
}
|
||||
|
@ -199,7 +199,7 @@ class TCMalloc_PageMap2 {
|
|||
// Skip to next top-level entry
|
||||
k = (i1 + 1) << LEAF_BITS;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -230,7 +230,7 @@ class TCMalloc_PageMap3 {
|
|||
|
||||
Node* NewNode() {
|
||||
Node* result = reinterpret_cast<Node*>((*allocator_)(sizeof(Node)));
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
memset(result, 0, sizeof(*result));
|
||||
}
|
||||
return result;
|
||||
|
@ -250,8 +250,8 @@ class TCMalloc_PageMap3 {
|
|||
const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
|
||||
const Number i3 = k & (LEAF_LENGTH-1);
|
||||
if ((k >> BITS) > 0 ||
|
||||
root_.ptrs[i1] == NULL || root_.ptrs[i1]->ptrs[i2] == NULL) {
|
||||
return NULL;
|
||||
root_.ptrs[i1] == nullptr || root_.ptrs[i1]->ptrs[i2] == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
return reinterpret_cast<Leaf*>(root_.ptrs[i1]->ptrs[i2])->values[i3];
|
||||
}
|
||||
|
@ -274,16 +274,16 @@ class TCMalloc_PageMap3 {
|
|||
return false;
|
||||
|
||||
// Make 2nd level node if necessary
|
||||
if (root_.ptrs[i1] == NULL) {
|
||||
if (root_.ptrs[i1] == nullptr) {
|
||||
Node* n = NewNode();
|
||||
if (n == NULL) return false;
|
||||
if (n == nullptr) return false;
|
||||
root_.ptrs[i1] = n;
|
||||
}
|
||||
|
||||
// Make leaf node if necessary
|
||||
if (root_.ptrs[i1]->ptrs[i2] == NULL) {
|
||||
if (root_.ptrs[i1]->ptrs[i2] == nullptr) {
|
||||
Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
|
||||
if (leaf == NULL) return false;
|
||||
if (leaf == nullptr) return false;
|
||||
memset(leaf, 0, sizeof(*leaf));
|
||||
root_.ptrs[i1]->ptrs[i2] = reinterpret_cast<Node*>(leaf);
|
||||
}
|
||||
|
@ -301,14 +301,14 @@ class TCMalloc_PageMap3 {
|
|||
while (k < (Number(1) << BITS)) {
|
||||
const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
|
||||
const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
|
||||
if (root_.ptrs[i1] == NULL) {
|
||||
if (root_.ptrs[i1] == nullptr) {
|
||||
// Advance to next top-level entry
|
||||
k = (i1 + 1) << (LEAF_BITS + INTERIOR_BITS);
|
||||
} else {
|
||||
Leaf* leaf = reinterpret_cast<Leaf*>(root_.ptrs[i1]->ptrs[i2]);
|
||||
if (leaf != NULL) {
|
||||
if (leaf != nullptr) {
|
||||
for (Number i3 = (k & (LEAF_LENGTH-1)); i3 < LEAF_LENGTH; i3++) {
|
||||
if (leaf->values[i3] != NULL) {
|
||||
if (leaf->values[i3] != nullptr) {
|
||||
return leaf->values[i3];
|
||||
}
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ class TCMalloc_PageMap3 {
|
|||
k = ((k >> LEAF_BITS) + 1) << LEAF_BITS;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -99,11 +99,11 @@ class ScopedSignalBlocker {
|
|||
ScopedSignalBlocker(int signo) {
|
||||
sigemptyset(&sig_set_);
|
||||
sigaddset(&sig_set_, signo);
|
||||
RAW_CHECK(sigprocmask(SIG_BLOCK, &sig_set_, NULL) == 0,
|
||||
RAW_CHECK(sigprocmask(SIG_BLOCK, &sig_set_, nullptr) == 0,
|
||||
"sigprocmask (block)");
|
||||
}
|
||||
~ScopedSignalBlocker() {
|
||||
RAW_CHECK(sigprocmask(SIG_UNBLOCK, &sig_set_, NULL) == 0,
|
||||
RAW_CHECK(sigprocmask(SIG_UNBLOCK, &sig_set_, nullptr) == 0,
|
||||
"sigprocmask (unblock)");
|
||||
}
|
||||
|
||||
|
@ -228,15 +228,15 @@ class ProfileHandler {
|
|||
DISALLOW_COPY_AND_ASSIGN(ProfileHandler);
|
||||
};
|
||||
|
||||
ProfileHandler* ProfileHandler::instance_ = NULL;
|
||||
ProfileHandler* ProfileHandler::instance_;
|
||||
|
||||
const int32_t ProfileHandler::kMaxFrequency;
|
||||
const int32_t ProfileHandler::kDefaultFrequency;
|
||||
|
||||
// If we are LD_PRELOAD-ed against a non-pthreads app, then these functions
|
||||
// won't be defined. We declare them here, for that case (with weak linkage)
|
||||
// which will cause the non-definition to resolve to NULL. We can then check
|
||||
// for NULL or not in Instance.
|
||||
// which will cause the non-definition to resolve to nullptr. We can then check
|
||||
// for nullptr or not in Instance.
|
||||
extern "C" {
|
||||
#if HAVE_LINUX_SIGEV_THREAD_ID
|
||||
int timer_create(clockid_t clockid, struct sigevent* evp,
|
||||
|
@ -336,7 +336,7 @@ ProfileHandler::ProfileHandler()
|
|||
// Get frequency of interrupts (if specified)
|
||||
char junk;
|
||||
const char* fr = getenv("CPUPROFILE_FREQUENCY");
|
||||
if (fr != NULL && (sscanf(fr, "%u%c", &frequency_, &junk) == 1) &&
|
||||
if (fr != nullptr && (sscanf(fr, "%u%c", &frequency_, &junk) == 1) &&
|
||||
(frequency_ > 0)) {
|
||||
// Limit to kMaxFrequency
|
||||
frequency_ = (frequency_ > kMaxFrequency) ? kMaxFrequency : frequency_;
|
||||
|
@ -360,7 +360,7 @@ ProfileHandler::ProfileHandler()
|
|||
per_thread_timer_enabled_ = true;
|
||||
// Override signal number if requested.
|
||||
if (signal_number) {
|
||||
signal_number_ = strtol(signal_number, NULL, 0);
|
||||
signal_number_ = strtol(signal_number, nullptr, 0);
|
||||
}
|
||||
} else {
|
||||
RAW_LOG(INFO,
|
||||
|
@ -385,7 +385,7 @@ ProfileHandler::ProfileHandler()
|
|||
sa.sa_sigaction = SignalHandler;
|
||||
sa.sa_flags = SA_RESTART | SA_SIGINFO;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
RAW_CHECK(sigaction(signal_number_, &sa, NULL) == 0, "sigprof (enable)");
|
||||
RAW_CHECK(sigaction(signal_number_, &sa, nullptr) == 0, "sigprof (enable)");
|
||||
}
|
||||
|
||||
ProfileHandler::~ProfileHandler() {
|
||||
|
@ -523,7 +523,7 @@ void ProfileHandler::UpdateTimer(bool enable) {
|
|||
|
||||
bool ProfileHandler::IsSignalHandlerAvailable() {
|
||||
struct sigaction sa;
|
||||
RAW_CHECK(sigaction(signal_number_, NULL, &sa) == 0, "is-signal-handler avail");
|
||||
RAW_CHECK(sigaction(signal_number_, nullptr, &sa) == 0, "is-signal-handler avail");
|
||||
|
||||
// We only take over the handler if the current one is unset.
|
||||
// It must be SIG_IGN or SIG_DFL, not some other function.
|
||||
|
@ -541,7 +541,7 @@ void ProfileHandler::SignalHandler(int sig, siginfo_t* sinfo, void* ucontext) {
|
|||
// enabled in RegisterThread or RegisterCallback only after
|
||||
// ProfileHandler::Instance runs.
|
||||
ProfileHandler* instance = instance_;
|
||||
RAW_CHECK(instance != NULL, "ProfileHandler is not initialized");
|
||||
RAW_CHECK(instance != nullptr, "ProfileHandler is not initialized");
|
||||
{
|
||||
SpinLockHolder sl(&instance->signal_lock_);
|
||||
++instance->interrupts_;
|
||||
|
@ -590,7 +590,7 @@ void ProfileHandlerRegisterThread() {
|
|||
|
||||
ProfileHandlerToken* ProfileHandlerRegisterCallback(
|
||||
ProfileHandlerCallback callback, void* callback_arg) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void ProfileHandlerUnregisterCallback(ProfileHandlerToken* token) {
|
||||
|
|
|
@ -103,7 +103,7 @@ bool ProfileData::Start(const char* fname,
|
|||
return false;
|
||||
}
|
||||
|
||||
start_time_ = time(NULL);
|
||||
start_time_ = time(nullptr);
|
||||
fname_ = strdup(fname);
|
||||
|
||||
// Reset counters
|
||||
|
|
|
@ -110,7 +110,7 @@ class CpuProfiler {
|
|||
SpinLock lock_;
|
||||
ProfileData collector_;
|
||||
|
||||
// Filter function and its argument, if any. (NULL means include all
|
||||
// Filter function and its argument, if any. (nullptr means include all
|
||||
// samples). Set at start, read-only while running. Written while holding
|
||||
// lock_, read and executed in the context of SIGPROF interrupt.
|
||||
int (*filter_)(void*);
|
||||
|
@ -169,8 +169,8 @@ CpuProfiler CpuProfiler::instance_;
|
|||
|
||||
// Initialize profiling: activated if getenv("CPUPROFILE") exists.
|
||||
CpuProfiler::CpuProfiler()
|
||||
: prof_handler_token_(NULL) {
|
||||
if (getenv("CPUPROFILE") == NULL) {
|
||||
: prof_handler_token_(nullptr) {
|
||||
if (getenv("CPUPROFILE") == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -182,8 +182,8 @@ CpuProfiler::CpuProfiler()
|
|||
#endif
|
||||
|
||||
char *signal_number_str = getenv("CPUPROFILESIGNAL");
|
||||
if (signal_number_str != NULL) {
|
||||
long int signal_number = strtol(signal_number_str, NULL, 10);
|
||||
if (signal_number_str != nullptr) {
|
||||
long int signal_number = strtol(signal_number_str, nullptr, 10);
|
||||
if (signal_number >= 1 && signal_number <= 64) {
|
||||
intptr_t old_signal_handler = reinterpret_cast<intptr_t>(signal(signal_number, CpuProfilerSwitch));
|
||||
if (old_signal_handler == 0) {
|
||||
|
@ -200,7 +200,7 @@ CpuProfiler::CpuProfiler()
|
|||
return;
|
||||
}
|
||||
|
||||
if (!Start(fname, NULL)) {
|
||||
if (!Start(fname, nullptr)) {
|
||||
RAW_LOG(FATAL, "Can't turn on cpu profiling for '%s': %s\n",
|
||||
fname, strerror(errno));
|
||||
}
|
||||
|
@ -223,8 +223,8 @@ bool CpuProfiler::Start(const char* fname, const ProfilerOptions* options) {
|
|||
return false;
|
||||
}
|
||||
|
||||
filter_ = NULL;
|
||||
if (options != NULL && options->filter_in_thread != NULL) {
|
||||
filter_ = nullptr;
|
||||
if (options != nullptr && options->filter_in_thread != nullptr) {
|
||||
filter_ = options->filter_in_thread;
|
||||
filter_arg_ = options->filter_in_thread_arg;
|
||||
}
|
||||
|
@ -309,15 +309,15 @@ void CpuProfiler::GetCurrentState(ProfilerState* state) {
|
|||
}
|
||||
|
||||
void CpuProfiler::EnableHandler() {
|
||||
RAW_CHECK(prof_handler_token_ == NULL, "SIGPROF handler already registered");
|
||||
RAW_CHECK(prof_handler_token_ == nullptr, "SIGPROF handler already registered");
|
||||
prof_handler_token_ = ProfileHandlerRegisterCallback(prof_handler, this);
|
||||
RAW_CHECK(prof_handler_token_ != NULL, "Failed to set up SIGPROF handler");
|
||||
RAW_CHECK(prof_handler_token_ != nullptr, "Failed to set up SIGPROF handler");
|
||||
}
|
||||
|
||||
void CpuProfiler::DisableHandler() {
|
||||
RAW_CHECK(prof_handler_token_ != NULL, "SIGPROF handler is not registered");
|
||||
RAW_CHECK(prof_handler_token_ != nullptr, "SIGPROF handler is not registered");
|
||||
ProfileHandlerUnregisterCallback(prof_handler_token_);
|
||||
prof_handler_token_ = NULL;
|
||||
prof_handler_token_ = nullptr;
|
||||
}
|
||||
|
||||
// Signal handler that records the pc in the profile-data structure. We do no
|
||||
|
@ -330,7 +330,7 @@ void CpuProfiler::prof_handler(int sig, siginfo_t*, void* signal_ucontext,
|
|||
void* cpu_profiler) {
|
||||
CpuProfiler* instance = static_cast<CpuProfiler*>(cpu_profiler);
|
||||
|
||||
if (instance->filter_ == NULL ||
|
||||
if (instance->filter_ == nullptr ||
|
||||
(*instance->filter_)(instance->filter_arg_)) {
|
||||
void* stack[ProfileData::kMaxStackDepth];
|
||||
|
||||
|
@ -378,7 +378,7 @@ extern "C" PERFTOOLS_DLL_DECL int ProfilingIsEnabledForAllThreads() {
|
|||
}
|
||||
|
||||
extern "C" PERFTOOLS_DLL_DECL int ProfilerStart(const char* fname) {
|
||||
return CpuProfiler::instance_.Start(fname, NULL);
|
||||
return CpuProfiler::instance_.Start(fname, nullptr);
|
||||
}
|
||||
|
||||
extern "C" PERFTOOLS_DLL_DECL int ProfilerStartWithOptions(
|
||||
|
|
10
src/span.cc
10
src/span.cc
|
@ -34,7 +34,7 @@
|
|||
#include <config.h>
|
||||
#include "span.h"
|
||||
|
||||
#include <string.h> // for NULL, memset
|
||||
#include <string.h> // for memset
|
||||
|
||||
#include "internal_logging.h" // for ASSERT
|
||||
#include "page_heap_allocator.h" // for PageHeapAllocator
|
||||
|
@ -62,8 +62,8 @@ void DLL_Init(Span* list) {
|
|||
void DLL_Remove(Span* span) {
|
||||
span->prev->next = span->next;
|
||||
span->next->prev = span->prev;
|
||||
span->prev = NULL;
|
||||
span->next = NULL;
|
||||
span->prev = nullptr;
|
||||
span->next = nullptr;
|
||||
}
|
||||
|
||||
int DLL_Length(const Span* list) {
|
||||
|
@ -75,8 +75,8 @@ int DLL_Length(const Span* list) {
|
|||
}
|
||||
|
||||
void DLL_Prepend(Span* list, Span* span) {
|
||||
ASSERT(span->next == NULL);
|
||||
ASSERT(span->prev == NULL);
|
||||
ASSERT(span->next == nullptr);
|
||||
ASSERT(span->prev == nullptr);
|
||||
span->next = list->next;
|
||||
span->prev = list;
|
||||
list->next->prev = span;
|
||||
|
|
|
@ -144,8 +144,9 @@ void DeleteSpan(Span* span);
|
|||
// Initialize *list to an empty list.
|
||||
void DLL_Init(Span* list);
|
||||
|
||||
// Remove 'span' from the linked list in which it resides, updating the
|
||||
// pointers of adjacent Spans and setting span's next and prev to NULL.
|
||||
// Remove 'span' from the linked list in which it resides, updating
|
||||
// the pointers of adjacent Spans and setting span's next and prev to
|
||||
// nullptr.
|
||||
void DLL_Remove(Span* span);
|
||||
|
||||
// Return true iff "list" is empty.
|
||||
|
|
|
@ -69,7 +69,7 @@ class StackTraceTable {
|
|||
void AddTrace(const StackTrace& t);
|
||||
|
||||
// Returns stack traces formatted per MallocExtension guidelines.
|
||||
// May return NULL on error. Clears state before returning.
|
||||
// May return nullptr on error. Clears state before returning.
|
||||
//
|
||||
// REQUIRES: L < pageheap_lock
|
||||
void** ReadStackTracesAndClear();
|
||||
|
|
|
@ -39,7 +39,8 @@
|
|||
// Anything that should only be defined once should be here:
|
||||
|
||||
#include <stdint.h> // for uintptr_t
|
||||
#include "base/basictypes.h" // for NULL
|
||||
|
||||
#include "base/basictypes.h"
|
||||
#include <gperftools/stacktrace.h>
|
||||
|
||||
// WARNING:
|
||||
|
@ -52,7 +53,7 @@
|
|||
// saved registers.
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return NULL if no stackframe can be found. Perform sanity
|
||||
// stackframe, or return nullptr if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template<bool STRICT_UNWINDING>
|
||||
|
@ -64,18 +65,18 @@ static void **NextStackFrame(void **old_sp) {
|
|||
if (STRICT_UNWINDING) {
|
||||
// With the stack growing downwards, older stack frame must be
|
||||
// at a greater address that the current one.
|
||||
if (new_sp <= old_sp) return NULL;
|
||||
if (new_sp <= old_sp) return nullptr;
|
||||
// Assume stack frames larger than 100,000 bytes are bogus.
|
||||
if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return NULL;
|
||||
if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
|
||||
} else {
|
||||
// In the non-strict mode, allow discontiguous stack frames.
|
||||
// (alternate-signal-stacks for example).
|
||||
if (new_sp == old_sp) return NULL;
|
||||
if (new_sp == old_sp) return nullptr;
|
||||
// And allow frames upto about 1MB.
|
||||
if ((new_sp > old_sp)
|
||||
&& ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return NULL;
|
||||
&& ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
|
||||
}
|
||||
if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return NULL;
|
||||
if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr;
|
||||
return new_sp;
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ static int GET_STACK_TRACE_OR_FRAMES {
|
|||
|
||||
_Unwind_Backtrace(libgcc_backtrace_helper, &data);
|
||||
|
||||
if (data.pos > 1 && data.array[data.pos - 1] == NULL)
|
||||
if (data.pos > 1 && data.array[data.pos - 1] == nullptr)
|
||||
--data.pos;
|
||||
|
||||
#if IS_STACK_FRAMES
|
||||
|
|
|
@ -41,11 +41,11 @@
|
|||
// Anything that should only be defined once should be here:
|
||||
|
||||
#include <stdint.h> // for uintptr_t
|
||||
#include <stdlib.h> // for NULL
|
||||
#include <stdlib.h>
|
||||
#include <gperftools/stacktrace.h>
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return NULL if no stackframe can be found. Perform sanity
|
||||
// stackframe, or return nullptr if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template<bool STRICT_UNWINDING>
|
||||
|
@ -57,18 +57,18 @@ static void **NextStackFrame(void **old_sp) {
|
|||
if (STRICT_UNWINDING) {
|
||||
// With the stack growing downwards, older stack frame must be
|
||||
// at a greater address that the current one.
|
||||
if (new_sp <= old_sp) return NULL;
|
||||
if (new_sp <= old_sp) return nullptr;
|
||||
// Assume stack frames larger than 100,000 bytes are bogus.
|
||||
if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return NULL;
|
||||
if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr;
|
||||
} else {
|
||||
// In the non-strict mode, allow discontiguous stack frames.
|
||||
// (alternate-signal-stacks for example).
|
||||
if (new_sp == old_sp) return NULL;
|
||||
if (new_sp == old_sp) return nullptr;
|
||||
// And allow frames upto about 1MB.
|
||||
if ((new_sp > old_sp)
|
||||
&& ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return NULL;
|
||||
&& ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr;
|
||||
}
|
||||
if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return NULL;
|
||||
if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr;
|
||||
return new_sp;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
// Anything that should only be defined once should be here:
|
||||
|
||||
#include <stdint.h> // for uintptr_t
|
||||
#include <stdlib.h> // for NULL
|
||||
#include <stdlib.h>
|
||||
#include <gperftools/stacktrace.h>
|
||||
|
||||
struct layout_ppc {
|
||||
|
@ -55,7 +55,7 @@ struct layout_ppc {
|
|||
};
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return NULL if no stackframe can be found. Perform sanity
|
||||
// stackframe, or return nullptr if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template<bool STRICT_UNWINDING>
|
||||
|
@ -69,21 +69,21 @@ static layout_ppc *NextStackFrame(layout_ppc *current) {
|
|||
// With the stack growing downwards, older stack frame must be
|
||||
// at a greater address that the current one.
|
||||
if (new_sp <= old_sp)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
// Assume stack frames larger than 100,000 bytes are bogus.
|
||||
if (new_sp - old_sp > 100000)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
} else {
|
||||
// In the non-strict mode, allow discontiguous stack frames.
|
||||
// (alternate-signal-stacks for example).
|
||||
if (new_sp == old_sp)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
// And allow frames upto about 1MB.
|
||||
if ((new_sp > old_sp) && (new_sp - old_sp > 1000000))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
if (new_sp & (sizeof(void *) - 1))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
return current->next;
|
||||
}
|
||||
|
||||
|
@ -168,8 +168,8 @@ static int GET_STACK_TRACE_OR_FRAMES {
|
|||
|
||||
// It's possible the second-last stack frame can't return
|
||||
// (that is, it's __libc_start_main), in which case
|
||||
// the CRT startup code will have set its LR to 'NULL'.
|
||||
if (n > 0 && result[n-1] == NULL)
|
||||
// the CRT startup code will have set its LR to 'nullptr'.
|
||||
if (n > 0 && result[n-1] == nullptr)
|
||||
n--;
|
||||
|
||||
return n;
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
// Anything that should only be defined once should be here:
|
||||
|
||||
#include <stdint.h> // for uintptr_t
|
||||
#include <stdlib.h> // for NULL
|
||||
#include <stdlib.h>
|
||||
#include <signal.h> // for siginfo_t
|
||||
#include <gperftools/stacktrace.h>
|
||||
#include <base/vdso_support.h>
|
||||
|
@ -76,7 +76,7 @@ struct layout_ppc {
|
|||
// the signal trampoline.
|
||||
|
||||
// Given a pointer to a stack frame, locate and return the calling
|
||||
// stackframe, or return NULL if no stackframe can be found. Perform sanity
|
||||
// stackframe, or return nullptr if no stackframe can be found. Perform sanity
|
||||
// checks (the strictness of which is controlled by the boolean parameter
|
||||
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
|
||||
template<bool STRICT_UNWINDING>
|
||||
|
@ -90,21 +90,21 @@ static layout_ppc *NextStackFrame(layout_ppc *current) {
|
|||
// With the stack growing downwards, older stack frame must be
|
||||
// at a greater address that the current one.
|
||||
if (new_sp <= old_sp)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
// Assume stack frames larger than 100,000 bytes are bogus.
|
||||
if (new_sp - old_sp > 100000)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
} else {
|
||||
// In the non-strict mode, allow discontiguous stack frames.
|
||||
// (alternate-signal-stacks for example).
|
||||
if (new_sp == old_sp)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
// And allow frames upto about 1MB.
|
||||
if ((new_sp > old_sp) && (new_sp - old_sp > 1000000))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
if (new_sp & (sizeof(void *) - 1))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
return current->next;
|
||||
}
|
||||
|
||||
|
@ -223,8 +223,8 @@ static int GET_STACK_TRACE_OR_FRAMES {
|
|||
|
||||
// It's possible the second-last stack frame can't return
|
||||
// (that is, it's __libc_start_main), in which case
|
||||
// the CRT startup code will have set its LR to 'NULL'.
|
||||
if (n > 0 && result[n-1] == NULL)
|
||||
// the CRT startup code will have set its LR to 'nullptr'.
|
||||
if (n > 0 && result[n-1] == nullptr)
|
||||
n--;
|
||||
|
||||
return n;
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
#include <config.h>
|
||||
#include "static_vars.h"
|
||||
#include <stddef.h> // for NULL
|
||||
#include <stddef.h>
|
||||
#include <new> // for operator new
|
||||
#ifndef _WIN32
|
||||
#include <pthread.h> // for pthread_atfork
|
||||
|
|
|
@ -113,7 +113,7 @@ const char* readlink_strdup(const char* path) {
|
|||
|
||||
} // namespace
|
||||
|
||||
// Returns NULL if we're on an OS where we can't get the invocation name.
|
||||
// Returns nullptr if we're on an OS where we can't get the invocation name.
|
||||
// Using a static var is ok because we're not called from a thread.
|
||||
static const char* GetProgramInvocationName() {
|
||||
#if defined(__linux__) || defined(__NetBSD__)
|
||||
|
@ -138,16 +138,16 @@ static const char* GetProgramInvocationName() {
|
|||
if (program_invocation_name[0] == '\0') { // first time calculating
|
||||
uint32_t length = sizeof(program_invocation_name);
|
||||
if (_NSGetExecutablePath(program_invocation_name, &length))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return program_invocation_name;
|
||||
#elif defined(__FreeBSD__)
|
||||
static char program_invocation_name[PATH_MAX];
|
||||
size_t len = sizeof(program_invocation_name);
|
||||
static const int name[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
|
||||
if (!sysctl(name, 4, program_invocation_name, &len, NULL, 0))
|
||||
if (!sysctl(name, 4, program_invocation_name, &len, nullptr, 0))
|
||||
return program_invocation_name;
|
||||
return NULL;
|
||||
return nullptr;
|
||||
#else
|
||||
return nullptr; // figure out a way to get argv[0]
|
||||
#endif
|
||||
|
@ -184,7 +184,7 @@ int SymbolTable::Symbolize() {
|
|||
return 0;
|
||||
#else
|
||||
const char* argv0 = GetProgramInvocationName();
|
||||
if (argv0 == NULL) { // can't call symbolize if we can't figure out our name
|
||||
if (argv0 == nullptr) { // can't call symbolize if we can't figure out our name
|
||||
PrintError("Cannot figure out the name of this executable (argv0)");
|
||||
return 0;
|
||||
}
|
||||
|
@ -194,8 +194,8 @@ int SymbolTable::Symbolize() {
|
|||
}
|
||||
|
||||
// All this work is to do two-way communication. ugh.
|
||||
int *child_in = NULL; // file descriptors
|
||||
int *child_out = NULL; // for now, we don't worry about child_err
|
||||
int *child_in = nullptr; // file descriptors
|
||||
int *child_out = nullptr; // for now, we don't worry about child_err
|
||||
int child_fds[5][2]; // socketpair may be called up to five times below
|
||||
|
||||
// The client program may close its stdin and/or stdout and/or stderr
|
||||
|
@ -214,7 +214,7 @@ int SymbolTable::Symbolize() {
|
|||
return 0;
|
||||
} else {
|
||||
if ((child_fds[i][0] > 2) && (child_fds[i][1] > 2)) {
|
||||
if (child_in == NULL) {
|
||||
if (child_in == nullptr) {
|
||||
child_in = child_fds[i];
|
||||
} else {
|
||||
child_out = child_fds[i];
|
||||
|
@ -251,7 +251,7 @@ int SymbolTable::Symbolize() {
|
|||
unsetenv("HEAPCHECK");
|
||||
unsetenv("PERFTOOLS_VERBOSE");
|
||||
execlp(get_pprof_path(), get_pprof_path(),
|
||||
"--symbols", argv0, NULL);
|
||||
"--symbols", argv0, nullptr);
|
||||
_exit(3); // if execvp fails, it's bad news for us
|
||||
}
|
||||
default: { // parent
|
||||
|
@ -309,7 +309,7 @@ int SymbolTable::Symbolize() {
|
|||
return 0;
|
||||
} else if (bytes_read == 0) {
|
||||
close(child_out[1]);
|
||||
wait(NULL);
|
||||
wait(nullptr);
|
||||
break;
|
||||
} else {
|
||||
total_bytes_read += bytes_read;
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
#include "config.h"
|
||||
#include <stdint.h> // for uintptr_t
|
||||
#include <stddef.h> // for NULL
|
||||
#include <stddef.h>
|
||||
#include <map>
|
||||
|
||||
using std::map;
|
||||
|
@ -48,7 +48,7 @@ using std::map;
|
|||
class SymbolTable {
|
||||
public:
|
||||
SymbolTable()
|
||||
: symbol_buffer_(NULL) {}
|
||||
: symbol_buffer_(nullptr) {}
|
||||
~SymbolTable() {
|
||||
delete[] symbol_buffer_;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include <config.h>
|
||||
#include <errno.h> // for EAGAIN, errno
|
||||
#include <fcntl.h> // for open, O_RDWR
|
||||
#include <stddef.h> // for size_t, NULL, ptrdiff_t
|
||||
#include <stddef.h> // for size_t, ptrdiff_t
|
||||
#include <stdint.h> // for uintptr_t, intptr_t
|
||||
#ifdef HAVE_MMAP
|
||||
#include <sys/mman.h> // for munmap, mmap, MADV_DONTNEED, etc
|
||||
|
@ -104,7 +104,7 @@ static size_t pagesize = 0;
|
|||
#endif
|
||||
|
||||
// The current system allocator
|
||||
SysAllocator* tcmalloc_sys_alloc = NULL;
|
||||
SysAllocator* tcmalloc_sys_alloc;
|
||||
|
||||
// Number of bytes taken from system.
|
||||
size_t TCMalloc_SystemTaken = 0;
|
||||
|
@ -144,13 +144,13 @@ class DefaultSysAllocator : public SysAllocator {
|
|||
DefaultSysAllocator() : SysAllocator() {
|
||||
for (int i = 0; i < kMaxAllocators; i++) {
|
||||
failed_[i] = true;
|
||||
allocs_[i] = NULL;
|
||||
names_[i] = NULL;
|
||||
allocs_[i] = nullptr;
|
||||
names_[i] = nullptr;
|
||||
}
|
||||
}
|
||||
void SetChildAllocator(SysAllocator* alloc, unsigned int index,
|
||||
const char* name) {
|
||||
if (index < kMaxAllocators && alloc != NULL) {
|
||||
if (index < kMaxAllocators && alloc != nullptr) {
|
||||
allocs_[index] = alloc;
|
||||
failed_[index] = false;
|
||||
names_[index] = name;
|
||||
|
@ -183,7 +183,7 @@ extern "C" {
|
|||
void* SbrkSysAllocator::Alloc(size_t size, size_t *actual_size,
|
||||
size_t alignment) {
|
||||
#if !defined(HAVE_SBRK) || defined(__UCLIBC__)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
#else
|
||||
// Check if we should use sbrk allocation.
|
||||
// FLAGS_malloc_skip_sbrk starts out as false (its uninitialized
|
||||
|
@ -192,12 +192,12 @@ void* SbrkSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
// That means that even if this flag is set to true, some (initial)
|
||||
// memory will be allocated with sbrk before the flag takes effect.
|
||||
if (FLAGS_malloc_skip_sbrk) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// sbrk will release memory if passed a negative number, so we do
|
||||
// a strict check here
|
||||
if (static_cast<ptrdiff_t>(size + alignment) < 0) return NULL;
|
||||
if (static_cast<ptrdiff_t>(size + alignment) < 0) return nullptr;
|
||||
|
||||
// This doesn't overflow because TCMalloc_SystemAlloc has already
|
||||
// tested for overflow at the alignment boundary.
|
||||
|
@ -217,12 +217,12 @@ void* SbrkSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
// http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/libc/misc/sbrk.c?rev=1.1.2.1&content-type=text/plain&cvsroot=glibc
|
||||
// Without this check, sbrk may succeed when it ought to fail.)
|
||||
if (reinterpret_cast<intptr_t>(tcmalloc_hooked_sbrk(0)) + size < size) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void* result = tcmalloc_hooked_sbrk(size);
|
||||
if (result == reinterpret_cast<void*>(-1)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Is it aligned?
|
||||
|
@ -241,7 +241,7 @@ void* SbrkSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
// that we can find an aligned region within it.
|
||||
result = tcmalloc_hooked_sbrk(size + alignment - 1);
|
||||
if (result == reinterpret_cast<void*>(-1)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
ptr = reinterpret_cast<uintptr_t>(result);
|
||||
if ((ptr & (alignment-1)) != 0) {
|
||||
|
@ -346,9 +346,9 @@ void* MmapSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
void* DefaultSysAllocator::Alloc(size_t size, size_t *actual_size,
|
||||
size_t alignment) {
|
||||
for (int i = 0; i < kMaxAllocators; i++) {
|
||||
if (!failed_[i] && allocs_[i] != NULL) {
|
||||
if (!failed_[i] && allocs_[i] != nullptr) {
|
||||
void* result = allocs_[i]->Alloc(size, actual_size, alignment);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
failed_[i] = true;
|
||||
|
@ -359,7 +359,7 @@ void* DefaultSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
for (int i = 0; i < kMaxAllocators; i++) {
|
||||
failed_[i] = false;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ATTRIBUTE_WEAK ATTRIBUTE_NOINLINE
|
||||
|
@ -396,7 +396,7 @@ void InitSystemAllocators(void) {
|
|||
void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size,
|
||||
size_t alignment) {
|
||||
// Discard requests that overflow
|
||||
if (size + alignment < size) return NULL;
|
||||
if (size + alignment < size) return nullptr;
|
||||
|
||||
SpinLockHolder lock_holder(&spinlock);
|
||||
|
||||
|
@ -409,12 +409,12 @@ void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size,
|
|||
if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner);
|
||||
|
||||
size_t actual_size_storage;
|
||||
if (actual_size == NULL) {
|
||||
if (actual_size == nullptr) {
|
||||
actual_size = &actual_size_storage;
|
||||
}
|
||||
|
||||
void* result = tcmalloc_sys_alloc->Alloc(size, actual_size, alignment);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
CHECK_CONDITION(
|
||||
CheckAddressBits(reinterpret_cast<uintptr_t>(result) + *actual_size - 1));
|
||||
TCMalloc_SystemTaken += *actual_size;
|
||||
|
|
|
@ -46,8 +46,8 @@ class SysAllocator;
|
|||
//
|
||||
// Allocate and return "N" bytes of zeroed memory.
|
||||
//
|
||||
// If actual_bytes is NULL then the returned memory is exactly the
|
||||
// requested size. If actual bytes is non-NULL then the allocator
|
||||
// If actual_bytes is nullptr then the returned memory is exactly the
|
||||
// requested size. If actual bytes is non-nullptr then the allocator
|
||||
// may optionally return more bytes than asked for (i.e. return an
|
||||
// entire "huge" page if a huge page allocator is in use).
|
||||
//
|
||||
|
@ -57,7 +57,7 @@ class SysAllocator;
|
|||
// CACHELINE_ALIGNED, the return pointer will always be cacheline
|
||||
// aligned.
|
||||
//
|
||||
// Returns NULL when out of memory.
|
||||
// Returns nullptr when out of memory.
|
||||
extern PERFTOOLS_DLL_DECL
|
||||
void* TCMalloc_SystemAlloc(size_t bytes, size_t *actual_bytes,
|
||||
size_t alignment = 0);
|
||||
|
|
104
src/tcmalloc.cc
104
src/tcmalloc.cc
|
@ -74,7 +74,7 @@
|
|||
// pagemap[q] == s
|
||||
// pagemap[p+1..q-1] are undefined
|
||||
// pagemap[p-1] and pagemap[q+1] are defined:
|
||||
// NULL if the corresponding page is not yet in the address space.
|
||||
// nullptr if the corresponding page is not yet in the address space.
|
||||
// Otherwise it points to a Span. This span may be free
|
||||
// or allocated. If free, it is in one of pageheap's freelist.
|
||||
//
|
||||
|
@ -95,7 +95,7 @@
|
|||
|
||||
#include <errno.h> // for ENOMEM, EINVAL, errno
|
||||
#include <stdint.h>
|
||||
#include <stddef.h> // for size_t, NULL
|
||||
#include <stddef.h> // for size_t
|
||||
#include <stdlib.h> // for getenv
|
||||
#include <string.h> // for strcmp, memset, strlen, etc
|
||||
#ifdef HAVE_UNISTD_H
|
||||
|
@ -326,10 +326,10 @@ struct TCMallocStats {
|
|||
PageHeap::Stats pageheap; // Stats from page heap
|
||||
};
|
||||
|
||||
// Get stats into "r". Also, if class_count != NULL, class_count[k]
|
||||
// Get stats into "r". Also, if class_count != nullptr, class_count[k]
|
||||
// will be set to the total number of objects of size class k in the
|
||||
// central cache, transfer cache, and per-thread caches. If small_spans
|
||||
// is non-NULL, it is filled. Same for large_spans.
|
||||
// is non-nullptr, it is filled. Same for large_spans.
|
||||
static void ExtractStats(TCMallocStats* r, uint64_t* class_count,
|
||||
PageHeap::SmallSpanStats* small_spans,
|
||||
PageHeap::LargeSpanStats* large_spans) {
|
||||
|
@ -358,10 +358,10 @@ static void ExtractStats(TCMallocStats* r, uint64_t* class_count,
|
|||
ThreadCache::GetThreadStats(&r->thread_bytes, class_count);
|
||||
r->metadata_bytes = tcmalloc::metadata_system_bytes();
|
||||
r->pageheap = Static::pageheap()->StatsLocked();
|
||||
if (small_spans != NULL) {
|
||||
if (small_spans != nullptr) {
|
||||
Static::pageheap()->GetSmallSpanStatsLocked(small_spans);
|
||||
}
|
||||
if (large_spans != NULL) {
|
||||
if (large_spans != nullptr) {
|
||||
Static::pageheap()->GetLargeSpanStatsLocked(large_spans);
|
||||
}
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ static void DumpStats(TCMalloc_Printer* out, int level) {
|
|||
if (level >= 2) {
|
||||
ExtractStats(&stats, class_count, &small, &large);
|
||||
} else {
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
}
|
||||
|
||||
static const double MiB = 1048576.0;
|
||||
|
@ -708,11 +708,11 @@ class TCMallocImplementation : public MallocExtension {
|
|||
}
|
||||
|
||||
virtual bool GetNumericProperty(const char* name, size_t* value) {
|
||||
ASSERT(name != NULL);
|
||||
ASSERT(name != nullptr);
|
||||
|
||||
if (strcmp(name, "generic.current_allocated_bytes") == 0) {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
*value = stats.pageheap.system_bytes
|
||||
- stats.thread_bytes
|
||||
- stats.central_bytes
|
||||
|
@ -724,14 +724,14 @@ class TCMallocImplementation : public MallocExtension {
|
|||
|
||||
if (strcmp(name, "generic.heap_size") == 0) {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
*value = stats.pageheap.system_bytes;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (strcmp(name, "generic.total_physical_bytes") == 0) {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
*value = stats.pageheap.system_bytes + stats.metadata_bytes -
|
||||
stats.pageheap.unmapped_bytes;
|
||||
return true;
|
||||
|
@ -748,21 +748,21 @@ class TCMallocImplementation : public MallocExtension {
|
|||
|
||||
if (strcmp(name, "tcmalloc.central_cache_free_bytes") == 0) {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
*value = stats.central_bytes;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (strcmp(name, "tcmalloc.transfer_cache_free_bytes") == 0) {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
*value = stats.transfer_bytes;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (strcmp(name, "tcmalloc.thread_cache_free_bytes") == 0) {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
*value = stats.thread_bytes;
|
||||
return true;
|
||||
}
|
||||
|
@ -840,7 +840,7 @@ class TCMallocImplementation : public MallocExtension {
|
|||
|
||||
if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
*value = stats.thread_bytes;
|
||||
return true;
|
||||
}
|
||||
|
@ -873,7 +873,7 @@ class TCMallocImplementation : public MallocExtension {
|
|||
}
|
||||
|
||||
virtual bool SetNumericProperty(const char* name, size_t value) {
|
||||
ASSERT(name != NULL);
|
||||
ASSERT(name != nullptr);
|
||||
|
||||
if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
|
||||
SpinLockHolder l(Static::pageheap_lock());
|
||||
|
@ -1191,12 +1191,12 @@ TCMallocGuard::TCMallocGuard() {
|
|||
|
||||
TCMallocGuard::~TCMallocGuard() {
|
||||
if (--tcmallocguard_refcount == 0) {
|
||||
const char* env = NULL;
|
||||
const char* env = nullptr;
|
||||
if (!RunningOnValgrind()) {
|
||||
// Valgrind uses it's own malloc so we cannot do MALLOCSTATS
|
||||
env = getenv("MALLOCSTATS");
|
||||
}
|
||||
if (env != NULL) {
|
||||
if (env != nullptr) {
|
||||
int level = atoi(env);
|
||||
if (level < 1) level = 1;
|
||||
PrintStats(level);
|
||||
|
@ -1230,7 +1230,7 @@ static ATTRIBUTE_UNUSED bool CheckCachedSizeClass(void *ptr) {
|
|||
}
|
||||
|
||||
static ALWAYS_INLINE void* CheckedMallocResult(void *result) {
|
||||
ASSERT(result == NULL || CheckCachedSizeClass(result));
|
||||
ASSERT(result == nullptr || CheckCachedSizeClass(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1249,8 +1249,8 @@ static void* DoSampledAllocation(size_t size) {
|
|||
// Allocate span
|
||||
auto pages = tcmalloc::pages(size == 0 ? 1 : size);
|
||||
Span *span = Static::pageheap()->New(pages);
|
||||
if (PREDICT_FALSE(span == NULL)) {
|
||||
return NULL;
|
||||
if (PREDICT_FALSE(span == nullptr)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SpinLockHolder h(Static::pageheap_lock());
|
||||
|
@ -1290,8 +1290,8 @@ void* handle_oom(malloc_fn retry_fn,
|
|||
errno = ENOMEM;
|
||||
if (!from_operator && !tc_new_mode) {
|
||||
// we're out of memory in C library function (malloc etc) and no
|
||||
// "new mode" forced on us. Just return NULL
|
||||
return NULL;
|
||||
// "new mode" forced on us. Just return nullptr
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// we're OOM in operator new or "new mode" is set. We might have to
|
||||
|
@ -1335,7 +1335,7 @@ void* handle_oom(malloc_fn retry_fn,
|
|||
// we get here if new_handler returns successfully. So we retry
|
||||
// allocation.
|
||||
void* rv = retry_fn(retry_arg);
|
||||
if (rv != NULL) {
|
||||
if (rv != nullptr) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -1445,7 +1445,7 @@ static void* do_malloc_pages(ThreadCache* heap, size_t size) {
|
|||
result = DoSampledAllocation(size);
|
||||
} else {
|
||||
Span* span = Static::pageheap()->New(num_pages);
|
||||
result = (PREDICT_FALSE(span == NULL) ? NULL : SpanToMallocResult(span));
|
||||
result = (PREDICT_FALSE(span == nullptr) ? nullptr : SpanToMallocResult(span));
|
||||
}
|
||||
|
||||
if (should_report_large(num_pages)) {
|
||||
|
@ -1455,7 +1455,7 @@ static void* do_malloc_pages(ThreadCache* heap, size_t size) {
|
|||
}
|
||||
|
||||
static void *nop_oom_handler(size_t size) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void* do_malloc(size_t size) {
|
||||
|
@ -1491,7 +1491,7 @@ static void *retry_malloc(void* size) {
|
|||
|
||||
ALWAYS_INLINE void* do_malloc_or_cpp_alloc(size_t size) {
|
||||
void *rv = do_malloc(size);
|
||||
if (PREDICT_TRUE(rv != NULL)) {
|
||||
if (PREDICT_TRUE(rv != nullptr)) {
|
||||
return rv;
|
||||
}
|
||||
return handle_oom(retry_malloc, reinterpret_cast<void *>(size),
|
||||
|
@ -1501,10 +1501,10 @@ ALWAYS_INLINE void* do_malloc_or_cpp_alloc(size_t size) {
|
|||
ALWAYS_INLINE void* do_calloc(size_t n, size_t elem_size) {
|
||||
// Overflow check
|
||||
const size_t size = n * elem_size;
|
||||
if (elem_size != 0 && size / elem_size != n) return NULL;
|
||||
if (elem_size != 0 && size / elem_size != n) return nullptr;
|
||||
|
||||
void* result = do_malloc_or_cpp_alloc(size);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
size_t total_size = size;
|
||||
if (!tcmalloc::IsEmergencyPtr(result)) {
|
||||
// On windows we support recalloc (which was apparently
|
||||
|
@ -1522,9 +1522,9 @@ ALWAYS_INLINE void* do_calloc(size_t n, size_t elem_size) {
|
|||
return result;
|
||||
}
|
||||
|
||||
// If ptr is NULL, do nothing. Otherwise invoke the given function.
|
||||
// If ptr is nullptr, do nothing. Otherwise invoke the given function.
|
||||
inline void free_null_or_invalid(void* ptr, void (*invalid_free_fn)(void*)) {
|
||||
if (ptr != NULL) {
|
||||
if (ptr != nullptr) {
|
||||
(*invalid_free_fn)(ptr);
|
||||
}
|
||||
}
|
||||
|
@ -1542,7 +1542,7 @@ static ATTRIBUTE_NOINLINE void do_free_pages(Span* span, void* ptr) {
|
|||
StackTrace* st = reinterpret_cast<StackTrace*>(span->objects);
|
||||
tcmalloc::DLL_Remove(span);
|
||||
Static::stacktrace_allocator()->Delete(st);
|
||||
span->objects = NULL;
|
||||
span->objects = nullptr;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -1586,7 +1586,7 @@ void do_free_with_callback(void* ptr,
|
|||
if (PREDICT_FALSE(!cache_hit)) {
|
||||
Span* span = Static::pageheap()->GetDescriptor(p);
|
||||
if (PREDICT_FALSE(!span)) {
|
||||
// span can be NULL because the pointer passed in is NULL or invalid
|
||||
// span can be nullptr because the pointer passed in is nullptr or invalid
|
||||
// (not something returned by malloc or friends), or because the
|
||||
// pointer was allocated with some other allocator besides
|
||||
// tcmalloc. The latter can happen if tcmalloc is linked in via
|
||||
|
@ -1599,7 +1599,7 @@ void do_free_with_callback(void* ptr,
|
|||
cl = span->sizeclass;
|
||||
if (PREDICT_FALSE(cl == 0)) {
|
||||
ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
|
||||
ASSERT(span != NULL && span->start == p);
|
||||
ASSERT(span != nullptr && span->start == p);
|
||||
do_free_pages(span, ptr);
|
||||
return;
|
||||
}
|
||||
|
@ -1609,7 +1609,7 @@ void do_free_with_callback(void* ptr,
|
|||
}
|
||||
}
|
||||
|
||||
if (PREDICT_TRUE(heap != NULL)) {
|
||||
if (PREDICT_TRUE(heap != nullptr)) {
|
||||
ASSERT(Static::IsInited());
|
||||
// If we've hit initialized thread cache, so we're done.
|
||||
heap->Deallocate(ptr, cl);
|
||||
|
@ -1628,7 +1628,7 @@ void do_free_with_callback(void* ptr,
|
|||
}
|
||||
|
||||
// Otherwise, delete directly into central cache
|
||||
tcmalloc::SLL_SetNext(ptr, NULL);
|
||||
tcmalloc::SLL_SetNext(ptr, nullptr);
|
||||
Static::central_cache()[cl].InsertRange(ptr, ptr, 1);
|
||||
}
|
||||
|
||||
|
@ -1641,7 +1641,7 @@ ALWAYS_INLINE void do_free(void* ptr) {
|
|||
// speed. If you change this function, look at that one too.
|
||||
inline size_t GetSizeWithCallback(const void* ptr,
|
||||
size_t (*invalid_getsize_fn)(const void*)) {
|
||||
if (ptr == NULL)
|
||||
if (ptr == nullptr)
|
||||
return 0;
|
||||
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
|
||||
uint32_t cl;
|
||||
|
@ -1650,7 +1650,7 @@ inline size_t GetSizeWithCallback(const void* ptr,
|
|||
}
|
||||
|
||||
const Span *span = Static::pageheap()->GetDescriptor(p);
|
||||
if (PREDICT_FALSE(span == NULL)) { // means we do not own this memory
|
||||
if (PREDICT_FALSE(span == nullptr)) { // means we do not own this memory
|
||||
return (*invalid_getsize_fn)(ptr);
|
||||
}
|
||||
|
||||
|
@ -1687,17 +1687,17 @@ ALWAYS_INLINE void* do_realloc_with_callback(
|
|||
const size_t upper_bound_to_shrink = old_size / 2ul;
|
||||
if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) {
|
||||
// Need to reallocate.
|
||||
void* new_ptr = NULL;
|
||||
void* new_ptr = nullptr;
|
||||
|
||||
if (new_size > old_size && new_size < lower_bound_to_grow) {
|
||||
new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow);
|
||||
}
|
||||
if (new_ptr == NULL) {
|
||||
if (new_ptr == nullptr) {
|
||||
// Either new_size is not a tiny increment, or last do_malloc failed.
|
||||
new_ptr = do_malloc_or_cpp_alloc(new_size);
|
||||
}
|
||||
if (PREDICT_FALSE(new_ptr == NULL)) {
|
||||
return NULL;
|
||||
if (PREDICT_FALSE(new_ptr == nullptr)) {
|
||||
return nullptr;
|
||||
}
|
||||
MallocHook::InvokeNewHook(new_ptr, new_size);
|
||||
memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
|
||||
|
@ -1719,9 +1719,9 @@ static ALWAYS_INLINE
|
|||
void* do_memalign_pages(size_t align, size_t size) {
|
||||
ASSERT((align & (align - 1)) == 0);
|
||||
ASSERT(align > kPageSize);
|
||||
if (size + align < size) return NULL; // Overflow
|
||||
if (size + align < size) return nullptr; // Overflow
|
||||
|
||||
if (PREDICT_FALSE(Static::pageheap() == NULL)) ThreadCache::InitModule();
|
||||
if (PREDICT_FALSE(Static::pageheap() == nullptr)) ThreadCache::InitModule();
|
||||
|
||||
// Allocate at least one byte to avoid boundary conditions below
|
||||
if (size == 0) size = 1;
|
||||
|
@ -1751,7 +1751,7 @@ inline int do_mallopt(int cmd, int value) {
|
|||
template <typename Mallinfo>
|
||||
inline Mallinfo do_mallinfo() {
|
||||
TCMallocStats stats;
|
||||
ExtractStats(&stats, NULL, NULL, NULL);
|
||||
ExtractStats(&stats, nullptr, nullptr, nullptr);
|
||||
|
||||
// Just some of the fields are filled in.
|
||||
Mallinfo info;
|
||||
|
@ -1783,7 +1783,7 @@ inline Mallinfo do_mallinfo() {
|
|||
|
||||
// As promised, the definition of this function, declared above.
|
||||
size_t TCMallocImplementation::GetAllocatedSize(const void* ptr) {
|
||||
if (ptr == NULL)
|
||||
if (ptr == nullptr)
|
||||
return 0;
|
||||
ASSERT(TCMallocImplementation::GetOwnership(ptr)
|
||||
!= TCMallocImplementation::kNotOwned);
|
||||
|
@ -1884,7 +1884,7 @@ template <void* OOMHandler(size_t)>
|
|||
ALWAYS_INLINE
|
||||
static void* do_allocate_full(size_t size) {
|
||||
void* p = do_malloc(size);
|
||||
if (PREDICT_FALSE(p == NULL)) {
|
||||
if (PREDICT_FALSE(p == nullptr)) {
|
||||
p = OOMHandler(size);
|
||||
}
|
||||
MallocHook::InvokeNewHook(p, size);
|
||||
|
@ -1929,7 +1929,7 @@ static ATTRIBUTE_SECTION(google_malloc)
|
|||
void* memalign_pages(size_t align, size_t size,
|
||||
bool from_operator, bool nothrow) {
|
||||
void *rv = do_memalign_pages(align, size);
|
||||
if (PREDICT_FALSE(rv == NULL)) {
|
||||
if (PREDICT_FALSE(rv == nullptr)) {
|
||||
retry_memalign_data data;
|
||||
data.align = align;
|
||||
data.size = size;
|
||||
|
@ -1965,7 +1965,7 @@ static void * malloc_fast_path(size_t size) {
|
|||
|
||||
ThreadCache *cache = ThreadCachePtr::GetIfPresent();
|
||||
|
||||
if (PREDICT_FALSE(cache == NULL)) {
|
||||
if (PREDICT_FALSE(cache == nullptr)) {
|
||||
return tcmalloc::dispatch_allocate_full<OOMHandler>(size);
|
||||
}
|
||||
|
||||
|
@ -2080,7 +2080,7 @@ TC_ALIAS(tc_free);
|
|||
|
||||
extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* old_ptr,
|
||||
size_t new_size) PERFTOOLS_NOTHROW {
|
||||
if (old_ptr == NULL) {
|
||||
if (old_ptr == nullptr) {
|
||||
void* result = do_malloc_or_cpp_alloc(new_size);
|
||||
MallocHook::InvokeNewHook(result, new_size);
|
||||
return result;
|
||||
|
@ -2088,7 +2088,7 @@ extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* old_ptr,
|
|||
if (new_size == 0) {
|
||||
MallocHook::InvokeDeleteHook(old_ptr);
|
||||
do_free(old_ptr);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
if (PREDICT_FALSE(tcmalloc::IsEmergencyPtr(old_ptr))) {
|
||||
return tcmalloc::EmergencyRealloc(old_ptr, new_size);
|
||||
|
@ -2185,7 +2185,7 @@ extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign(
|
|||
}
|
||||
|
||||
void* result = tc_memalign(align, size);
|
||||
if (PREDICT_FALSE(result == NULL)) {
|
||||
if (PREDICT_FALSE(result == nullptr)) {
|
||||
return ENOMEM;
|
||||
} else {
|
||||
*result_ptr = result;
|
||||
|
|
|
@ -46,8 +46,8 @@
|
|||
#include <sys/time.h> // for setitimer
|
||||
|
||||
// Needs to be volatile so compiler doesn't try to optimize it away
|
||||
static volatile void* getpc_retval = NULL; // what GetPC returns
|
||||
static volatile bool prof_handler_called = false;
|
||||
static volatile void* getpc_retval; // what GetPC returns
|
||||
static volatile bool prof_handler_called;
|
||||
|
||||
extern "C" {
|
||||
// This helps us inspect codegen of GetPC function, just in case.
|
||||
|
@ -68,7 +68,7 @@ static void RoutineCallingTheSignal() {
|
|||
sa.sa_sigaction = prof_handler;
|
||||
sa.sa_flags = SA_RESTART | SA_SIGINFO;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
if (sigaction(SIGPROF, &sa, NULL) != 0) {
|
||||
if (sigaction(SIGPROF, &sa, nullptr) != 0) {
|
||||
perror("sigaction");
|
||||
exit(1);
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ static void DoWipeStack(int n); // defined below
|
|||
static void WipeStack() { DoWipeStack(20); }
|
||||
|
||||
static void Pause() {
|
||||
poll(NULL, 0, 77); // time for thread activity in HeapBusyThreadBody
|
||||
poll(nullptr, 0, 77); // time for thread activity in HeapBusyThreadBody
|
||||
|
||||
// Indirectly test malloc_extension.*:
|
||||
CHECK(MallocExtension::instance()->VerifyAllMemory());
|
||||
|
@ -366,14 +366,14 @@ static void DoDeAllocHidden(void** ptr) {
|
|||
|
||||
static void DeAllocHidden(void** ptr) {
|
||||
RunHidden(NewCallback(DoDeAllocHidden, ptr));
|
||||
*ptr = NULL;
|
||||
*ptr = nullptr;
|
||||
Use(ptr);
|
||||
}
|
||||
|
||||
void PreventHeapReclaiming(size_t size) {
|
||||
#ifdef NDEBUG
|
||||
if (true) {
|
||||
static void** no_reclaim_list = NULL;
|
||||
static void** no_reclaim_list = nullptr;
|
||||
CHECK(size >= sizeof(void*));
|
||||
// We can't use malloc_reclaim_memory flag in opt mode as debugallocation.cc
|
||||
// is not used. Instead we allocate a bunch of heap objects that are
|
||||
|
@ -683,7 +683,7 @@ static void ThreadDisabledLeaks() {
|
|||
pthread_t tid;
|
||||
pthread_attr_t attr;
|
||||
CHECK_EQ(pthread_attr_init(&attr), 0);
|
||||
CHECK_EQ(pthread_create(&tid, &attr, RunDisabledLeaks, NULL), 0);
|
||||
CHECK_EQ(pthread_create(&tid, &attr, RunDisabledLeaks, nullptr), 0);
|
||||
void* res;
|
||||
CHECK_EQ(pthread_join(tid, &res), 0);
|
||||
}
|
||||
|
@ -692,10 +692,10 @@ static void ThreadDisabledLeaks() {
|
|||
static void TestHeapLeakCheckerDisabling() {
|
||||
HeapLeakChecker check("disabling");
|
||||
|
||||
RunDisabledLeaks(NULL);
|
||||
RunDisabledLeaks(NULL);
|
||||
RunDisabledLeaks(nullptr);
|
||||
RunDisabledLeaks(nullptr);
|
||||
ThreadDisabledLeaks();
|
||||
RunDisabledLeaks(NULL);
|
||||
RunDisabledLeaks(nullptr);
|
||||
ThreadDisabledLeaks();
|
||||
ThreadDisabledLeaks();
|
||||
|
||||
|
@ -783,13 +783,13 @@ static void DirectTestSTLAlloc(Alloc allocator, const char* name) {
|
|||
}
|
||||
for (int i = 0; i < kSize; ++i) {
|
||||
allocator.deallocate(ptrs[i], i*3+1);
|
||||
ptrs[i] = NULL;
|
||||
ptrs[i] = nullptr;
|
||||
}
|
||||
CHECK(check.BriefSameHeap()); // just in case
|
||||
}
|
||||
|
||||
static SpinLock grplock;
|
||||
static struct group* grp = NULL;
|
||||
static struct group* grp;
|
||||
static const int kKeys = 50;
|
||||
static pthread_key_t key[kKeys];
|
||||
|
||||
|
@ -812,7 +812,7 @@ static void TestLibCAllocate() {
|
|||
CHECK(key_init_has_run);
|
||||
for (int i = 0; i < kKeys; ++i) {
|
||||
void* p = pthread_getspecific(key[i]);
|
||||
if (NULL == p) {
|
||||
if (nullptr == p) {
|
||||
if (i == 0) {
|
||||
// Test-logging inside threads which (potentially) creates and uses
|
||||
// thread-local data inside standard C++ library:
|
||||
|
@ -826,7 +826,7 @@ static void TestLibCAllocate() {
|
|||
}
|
||||
|
||||
strerror(errno);
|
||||
const time_t now = time(NULL);
|
||||
const time_t now = time(nullptr);
|
||||
ctime(&now);
|
||||
|
||||
#ifdef HAVE_EXECINFO_H
|
||||
|
@ -837,7 +837,7 @@ static void TestLibCAllocate() {
|
|||
if (grplock.TryLock()) {
|
||||
gid_t gid = getgid();
|
||||
getgrgid(gid);
|
||||
if (grp == NULL) grp = getgrent(); // a race condition here is okay
|
||||
if (grp == nullptr) grp = getgrent(); // a race condition here is okay
|
||||
getgrnam(grp->gr_name);
|
||||
getpwuid(geteuid());
|
||||
grplock.Unlock();
|
||||
|
@ -862,7 +862,7 @@ static void* HeapBusyThreadBody(void* a) {
|
|||
#else
|
||||
int** ptr;
|
||||
#endif
|
||||
ptr = NULL;
|
||||
ptr = nullptr;
|
||||
typedef std::set<int> Set;
|
||||
Set s1;
|
||||
while (1) {
|
||||
|
@ -871,7 +871,7 @@ static void* HeapBusyThreadBody(void* a) {
|
|||
if (!g_have_exited_main)
|
||||
TestLibCAllocate();
|
||||
|
||||
if (ptr == NULL) {
|
||||
if (ptr == nullptr) {
|
||||
ptr = new(initialized) int*[1];
|
||||
*ptr = new(initialized) int[1];
|
||||
}
|
||||
|
@ -914,13 +914,13 @@ static void* HeapBusyThreadBody(void* a) {
|
|||
ptr = reinterpret_cast<int **>(
|
||||
reinterpret_cast<uintptr_t>(ptr) ^ kHideMask);
|
||||
} else {
|
||||
poll(NULL, 0, random() % 100);
|
||||
poll(nullptr, 0, random() % 100);
|
||||
}
|
||||
VLOG(2) << pthread_self() << ": continuing";
|
||||
if (random() % 3 == 0) {
|
||||
delete [] *ptr;
|
||||
delete [] ptr;
|
||||
ptr = NULL;
|
||||
ptr = nullptr;
|
||||
}
|
||||
delete [] s2;
|
||||
}
|
||||
|
@ -1080,7 +1080,7 @@ REGISTER_OBJ_MAKER(
|
|||
|
||||
class ClassA {
|
||||
public:
|
||||
explicit ClassA(int a) : ptr(NULL) { }
|
||||
explicit ClassA(int a) : ptr(nullptr) { }
|
||||
mutable char* ptr;
|
||||
};
|
||||
static const ClassA live_leak_mutable(1);
|
||||
|
@ -1088,7 +1088,7 @@ static const ClassA live_leak_mutable(1);
|
|||
template<class C>
|
||||
class TClass {
|
||||
public:
|
||||
explicit TClass(int a) : ptr(NULL) { }
|
||||
explicit TClass(int a) : ptr(nullptr) { }
|
||||
mutable C val;
|
||||
mutable C* ptr;
|
||||
};
|
||||
|
@ -1254,7 +1254,7 @@ static void TestHeapLeakCheckerLiveness() {
|
|||
|
||||
// Get address (PC value) following the mmap call into addr_after_mmap_call
|
||||
static void* Mmapper(uintptr_t* addr_after_mmap_call) {
|
||||
void* r = mmap(NULL, 100, PROT_READ|PROT_WRITE,
|
||||
void* r = mmap(nullptr, 100, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
// Get current PC value into addr_after_mmap_call
|
||||
void* stack[1];
|
||||
|
|
|
@ -91,7 +91,7 @@ static void TestHeapProfilerStartStopIsRunning() {
|
|||
// IsHeapProfilerRunning should return true.
|
||||
if (!IsHeapProfilerRunning()) {
|
||||
const char* tmpdir = getenv("TMPDIR");
|
||||
if (tmpdir == NULL)
|
||||
if (tmpdir == nullptr)
|
||||
tmpdir = "/tmp";
|
||||
mkdir(tmpdir, 0755); // if necessary
|
||||
HeapProfilerStart((std::string(tmpdir) + "/start_stop").c_str());
|
||||
|
@ -110,7 +110,7 @@ static void TestDumpHeapProfiler() {
|
|||
// IsHeapProfilerRunning should return true.
|
||||
if (!IsHeapProfilerRunning()) {
|
||||
const char* tmpdir = getenv("TMPDIR");
|
||||
if (tmpdir == NULL)
|
||||
if (tmpdir == nullptr)
|
||||
tmpdir = "/tmp";
|
||||
mkdir(tmpdir, 0755); // if necessary
|
||||
HeapProfilerStart((std::string(tmpdir) + "/dump").c_str());
|
||||
|
@ -162,9 +162,9 @@ int main(int argc, char** argv) {
|
|||
printf("FORK failed!\n");
|
||||
return 1;
|
||||
case 0: // child
|
||||
return execl(argv[0], argv[0], NULL); // run child with no args
|
||||
return execl(argv[0], argv[0], nullptr); // run child with no args
|
||||
default:
|
||||
wait(NULL); // we'll let the kids run one at a time
|
||||
wait(nullptr); // we'll let the kids run one at a time
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ TEST(MallocExtensionTest, Basics) {
|
|||
ASSERT_EQ(MallocExtension::kNotOwned,
|
||||
MallocExtension::instance()->GetOwnership(&cxx_bytes_used));
|
||||
ASSERT_EQ(MallocExtension::kNotOwned,
|
||||
MallocExtension::instance()->GetOwnership(NULL));
|
||||
MallocExtension::instance()->GetOwnership(nullptr));
|
||||
ASSERT_GE(MallocExtension::instance()->GetAllocatedSize(a), 1000);
|
||||
// This is just a sanity check. If we allocated too much, tcmalloc is broken
|
||||
ASSERT_LE(MallocExtension::instance()->GetAllocatedSize(a), 5000);
|
||||
|
@ -81,7 +81,7 @@ TEST(MallocExtensionTest, Basics) {
|
|||
ASSERT_EQ(MallocExtension_kOwned, MallocExtension_GetOwnership(a));
|
||||
ASSERT_EQ(MallocExtension_kNotOwned,
|
||||
MallocExtension_GetOwnership(&cxx_bytes_used));
|
||||
ASSERT_EQ(MallocExtension_kNotOwned, MallocExtension_GetOwnership(NULL));
|
||||
ASSERT_EQ(MallocExtension_kNotOwned, MallocExtension_GetOwnership(nullptr));
|
||||
ASSERT_GE(MallocExtension_GetAllocatedSize(a), 1000);
|
||||
ASSERT_LE(MallocExtension_GetAllocatedSize(a), 5000);
|
||||
ASSERT_GE(MallocExtension_GetEstimatedAllocatedSize(1000), 1000);
|
||||
|
|
|
@ -81,7 +81,7 @@ class Thread {
|
|||
}
|
||||
void Join() {
|
||||
assert(joinable_);
|
||||
pthread_join(thread_, NULL);
|
||||
pthread_join(thread_, nullptr);
|
||||
}
|
||||
virtual void Run() = 0;
|
||||
private:
|
||||
|
@ -93,7 +93,7 @@ class Thread {
|
|||
|
||||
ProfileHandlerRegisterThread();
|
||||
self->Run();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
pthread_t thread_;
|
||||
bool joinable_;
|
||||
|
@ -191,10 +191,10 @@ class ProfileHandlerTest : public ::testing::Test {
|
|||
timer_type_ = (getenv("CPUPROFILE_REALTIME") ? ITIMER_REAL : ITIMER_PROF);
|
||||
|
||||
#if HAVE_LINUX_SIGEV_THREAD_ID
|
||||
linux_per_thread_timers_mode_ = (getenv("CPUPROFILE_PER_THREAD_TIMERS") != NULL);
|
||||
linux_per_thread_timers_mode_ = (getenv("CPUPROFILE_PER_THREAD_TIMERS") != nullptr);
|
||||
const char *signal_number = getenv("CPUPROFILE_TIMER_SIGNAL");
|
||||
if (signal_number) {
|
||||
//signal_number_ = strtol(signal_number, NULL, 0);
|
||||
//signal_number_ = strtol(signal_number, nullptr, 0);
|
||||
linux_per_thread_timers_mode_ = true;
|
||||
Delay(kTimerResetInterval);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ class ProfileDataChecker {
|
|||
public:
|
||||
ProfileDataChecker() {
|
||||
const char* tmpdir = getenv("TMPDIR");
|
||||
if (tmpdir == NULL)
|
||||
if (tmpdir == nullptr)
|
||||
tmpdir = "/tmp";
|
||||
mkdir(tmpdir, 0755); // if necessary
|
||||
filename_ = std::string(tmpdir) + "/profiledata_unittest.tmp";
|
||||
|
@ -124,7 +124,7 @@ class ProfileDataChecker {
|
|||
// data matched, otherwise returns an indication of the cause of the
|
||||
// mismatch.
|
||||
std::string Check(const ProfileDataSlot* slots, int num_slots) {
|
||||
return CheckWithSkips(slots, num_slots, NULL, 0);
|
||||
return CheckWithSkips(slots, num_slots, nullptr, 0);
|
||||
}
|
||||
|
||||
// Checks the first 'num_slots' profile data slots in the file
|
||||
|
|
|
@ -128,7 +128,7 @@ int main(int argc, char** argv) {
|
|||
|
||||
g_iters = atoi(argv[1]);
|
||||
int num_threads = 1;
|
||||
const char* filename = NULL;
|
||||
const char* filename = nullptr;
|
||||
if (argc > 2) {
|
||||
num_threads = atoi(argv[2]);
|
||||
}
|
||||
|
@ -161,9 +161,9 @@ int main(int argc, char** argv) {
|
|||
printf("FORK failed!\n");
|
||||
return 1;
|
||||
case 0: // child
|
||||
return execl(argv[0], argv[0], argv[1], NULL);
|
||||
return execl(argv[0], argv[0], argv[1], nullptr);
|
||||
default:
|
||||
wait(NULL); // we'll let the kids run one at a time
|
||||
wait(nullptr); // we'll let the kids run one at a time
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -96,7 +96,7 @@ TEST(ReallocUnittest, Basics) {
|
|||
ASSERT_TRUE(Valid(dst, std::min(src_size, dst_size)));
|
||||
Fill(dst, dst_size);
|
||||
ASSERT_TRUE(Valid(dst, dst_size));
|
||||
if (dst != NULL) free(dst);
|
||||
if (dst != nullptr) free(dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
// For 32 bits, this means allocations near 2^32 bytes and 2^31 bytes.
|
||||
// For 64 bits, this means allocations near 2^64 bytes and 2^63 bytes.
|
||||
|
||||
#include <stddef.h> // for size_t, NULL
|
||||
#include <stddef.h> // for size_t
|
||||
#include <stdlib.h> // for malloc, free, realloc
|
||||
#include <stdio.h>
|
||||
#include <set> // for set, etc
|
||||
|
@ -51,13 +51,13 @@ using std::set;
|
|||
|
||||
void TryAllocExpectFail(size_t size) {
|
||||
void* p1 = noopt(malloc)(size);
|
||||
CHECK(p1 == NULL);
|
||||
CHECK(p1 == nullptr);
|
||||
|
||||
void* p2 = noopt(malloc)(1);
|
||||
CHECK(p2 != NULL);
|
||||
CHECK(p2 != nullptr);
|
||||
|
||||
void* p3 = noopt(realloc)(p2, size);
|
||||
CHECK(p3 == NULL);
|
||||
CHECK(p3 == nullptr);
|
||||
|
||||
free(p2);
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ void TryAllocExpectFail(size_t size) {
|
|||
|
||||
void TryAllocMightFail(size_t size) {
|
||||
unsigned char* p = static_cast<unsigned char*>(noopt(malloc)(size));
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
static const size_t kPoints = 1024;
|
||||
|
||||
for ( size_t i = 0; i < kPoints; ++i ) {
|
||||
|
@ -95,7 +95,7 @@ int main (int argc, char** argv) {
|
|||
set<char*> p_set;
|
||||
for ( int i = 0; i < kZeroTimes; ++i ) {
|
||||
char* p = new char;
|
||||
CHECK(p != NULL);
|
||||
CHECK(p != nullptr);
|
||||
CHECK(p_set.find(p) == p_set.end());
|
||||
p_set.insert(p_set.end(), p);
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ int main (int argc, char** argv) {
|
|||
// Grab some memory so that some later allocations are guaranteed to fail.
|
||||
printf("Test small malloc\n");
|
||||
void* p_small = noopt(malloc(4*1048576));
|
||||
CHECK(p_small != NULL);
|
||||
CHECK(p_small != nullptr);
|
||||
|
||||
// Test sizes up near the maximum size_t.
|
||||
// These allocations test the wrap-around code.
|
||||
|
@ -117,7 +117,7 @@ int main (int argc, char** argv) {
|
|||
}
|
||||
|
||||
// Test sizes a bit smaller.
|
||||
// The small malloc above guarantees that all these return NULL.
|
||||
// The small malloc above guarantees that all these return nullptr.
|
||||
printf("Test malloc(0 - 1048576 - N)\n");
|
||||
static const size_t kMinusMBMinusNTimes = 16384;
|
||||
for ( size_t i = 0; i < kMinusMBMinusNTimes; ++i) {
|
||||
|
|
|
@ -149,7 +149,7 @@ static bool kOSSupportsMemalign = false;
|
|||
static inline void* Memalign(size_t align, size_t size) {
|
||||
//LOG(FATAL) << "memalign not supported on windows";
|
||||
exit(1);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
static inline int PosixMemalign(void** ptr, size_t align, size_t size) {
|
||||
//LOG(FATAL) << "posix_memalign not supported on windows";
|
||||
|
@ -164,7 +164,7 @@ static bool kOSSupportsMemalign = false;
|
|||
static inline void* Memalign(size_t align, size_t size) {
|
||||
//LOG(FATAL) << "memalign not supported on OS X";
|
||||
exit(1);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
static inline int PosixMemalign(void** ptr, size_t align, size_t size) {
|
||||
//LOG(FATAL) << "posix_memalign not supported on OS X";
|
||||
|
@ -200,7 +200,7 @@ struct OOMAbleSysAlloc : public SysAllocator {
|
|||
|
||||
void* Alloc(size_t size, size_t* actual_size, size_t alignment) {
|
||||
if (simulate_oom) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return child->Alloc(size, actual_size, alignment);
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ class AllocatorState : public TestHarness {
|
|||
if (err != 0) {
|
||||
CHECK_EQ(err, ENOMEM);
|
||||
}
|
||||
return err == 0 ? result : NULL;
|
||||
return err == 0 ? result : nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -609,12 +609,12 @@ TEST(TCMallocTest, ManyThreads) {
|
|||
|
||||
static void TryHugeAllocation(size_t s, AllocatorState* rnd) {
|
||||
void* p = rnd->alloc(noopt(s));
|
||||
CHECK(p == NULL); // huge allocation s should fail!
|
||||
CHECK(p == nullptr); // huge allocation s should fail!
|
||||
}
|
||||
|
||||
static void TestHugeAllocations(AllocatorState* rnd) {
|
||||
// Check that asking for stuff tiny bit smaller than largest possible
|
||||
// size returns NULL.
|
||||
// size returns nullptr.
|
||||
for (size_t i = 0; i < 70000; i += rnd->Uniform(20)) {
|
||||
TryHugeAllocation(kMaxSize - i, rnd);
|
||||
}
|
||||
|
@ -623,9 +623,9 @@ static void TestHugeAllocations(AllocatorState* rnd) {
|
|||
if (!TestingPortal::Get()->IsDebuggingMalloc()) {
|
||||
// debug allocation takes forever for huge allocs
|
||||
for (size_t i = 0; i < 100; i++) {
|
||||
void* p = NULL;
|
||||
void* p = nullptr;
|
||||
p = rnd->alloc(kMaxSignedSize + i);
|
||||
if (p) free(p); // if: free(NULL) is not necessarily defined
|
||||
if (p) free(p); // if: free(nullptr) is not necessarily defined
|
||||
p = rnd->alloc(kMaxSignedSize - i);
|
||||
if (p) free(p);
|
||||
}
|
||||
|
@ -641,9 +641,9 @@ static void TestHugeAllocations(AllocatorState* rnd) {
|
|||
static void TestCalloc(size_t n, size_t s, bool ok) {
|
||||
char* p = reinterpret_cast<char*>(noopt(calloc)(n, s));
|
||||
if (!ok) {
|
||||
CHECK(p == NULL); // calloc(n, s) should not succeed
|
||||
CHECK(p == nullptr); // calloc(n, s) should not succeed
|
||||
} else {
|
||||
CHECK(p != NULL); // calloc(n, s) should succeed
|
||||
CHECK(p != nullptr); // calloc(n, s) should succeed
|
||||
for (int i = 0; i < n*s; i++) {
|
||||
CHECK(p[i] == '\0');
|
||||
}
|
||||
|
@ -1043,8 +1043,8 @@ TEST(TCMallocTest, AggressiveDecommit) {
|
|||
// On MSVC10, in release mode, the optimizer convinces itself
|
||||
// g_no_memory is never changed (I guess it doesn't realize OnNoMemory
|
||||
// might be called). Work around this by setting the var volatile.
|
||||
volatile bool g_no_memory = false;
|
||||
std::new_handler g_old_handler = NULL;
|
||||
volatile bool g_no_memory;
|
||||
std::new_handler g_old_handler;
|
||||
static void OnNoMemory() {
|
||||
g_no_memory = true;
|
||||
std::set_new_handler(g_old_handler);
|
||||
|
@ -1056,19 +1056,19 @@ TEST(TCMallocTest, SetNewMode) {
|
|||
g_old_handler = std::set_new_handler(&OnNoMemory);
|
||||
g_no_memory = false;
|
||||
void* ret = noopt(malloc(noopt(kTooBig)));
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_TRUE(g_no_memory);
|
||||
|
||||
g_old_handler = std::set_new_handler(&OnNoMemory);
|
||||
g_no_memory = false;
|
||||
ret = noopt(calloc(1, noopt(kTooBig)));
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_TRUE(g_no_memory);
|
||||
|
||||
g_old_handler = std::set_new_handler(&OnNoMemory);
|
||||
g_no_memory = false;
|
||||
ret = noopt(realloc(nullptr, noopt(kTooBig)));
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_TRUE(g_no_memory);
|
||||
|
||||
if (kOSSupportsMemalign) {
|
||||
|
@ -1079,14 +1079,14 @@ TEST(TCMallocTest, SetNewMode) {
|
|||
g_old_handler = std::set_new_handler(&OnNoMemory);
|
||||
g_no_memory = false;
|
||||
ret = Memalign(kAlignment, kTooBig);
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_TRUE(g_no_memory);
|
||||
|
||||
g_old_handler = std::set_new_handler(&OnNoMemory);
|
||||
g_no_memory = false;
|
||||
EXPECT_EQ(ENOMEM,
|
||||
PosixMemalign(&ret, kAlignment, kTooBig));
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_TRUE(g_no_memory);
|
||||
}
|
||||
|
||||
|
@ -1098,18 +1098,18 @@ TEST(TCMallocTest, TestErrno) {
|
|||
if (kOSSupportsMemalign) {
|
||||
errno = 0;
|
||||
ret = Memalign(128, kTooBig);
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_EQ(ENOMEM, errno);
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
ret = noopt(malloc(noopt(kTooBig)));
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_EQ(ENOMEM, errno);
|
||||
|
||||
errno = 0;
|
||||
ret = tc_malloc_skip_new_handler(kTooBig);
|
||||
EXPECT_EQ(NULL, ret);
|
||||
EXPECT_EQ(nullptr, ret);
|
||||
EXPECT_EQ(ENOMEM, errno);
|
||||
}
|
||||
|
||||
|
@ -1531,11 +1531,11 @@ TEST(TCMallocTest, AllTests) {
|
|||
// Do the memory intensive tests after threads are done, since exhausting
|
||||
// the available address space can make pthread_create to fail.
|
||||
|
||||
// Check that huge allocations fail with NULL instead of crashing
|
||||
// Check that huge allocations fail with nullptr instead of crashing
|
||||
printf("Testing huge allocations\n");
|
||||
TestHugeAllocations(&rnd);
|
||||
|
||||
// Check that large allocations fail with NULL instead of crashing
|
||||
// Check that large allocations fail with nullptr instead of crashing
|
||||
//
|
||||
// debug allocation takes forever for huge allocs
|
||||
if (!TestingPortal::Get()->IsDebuggingMalloc()) {
|
||||
|
|
|
@ -72,9 +72,9 @@ std::atomic<size_t> ThreadCache::min_per_thread_cache_size_ = kMinThreadCacheSiz
|
|||
size_t ThreadCache::overall_thread_cache_size_ = kDefaultOverallThreadCacheSize;
|
||||
ssize_t ThreadCache::unclaimed_cache_space_ = kDefaultOverallThreadCacheSize;
|
||||
PageHeapAllocator<ThreadCache> threadcache_allocator;
|
||||
ThreadCache* ThreadCache::thread_heaps_ = NULL;
|
||||
int ThreadCache::thread_heap_count_ = 0;
|
||||
ThreadCache* ThreadCache::next_memory_steal_ = NULL;
|
||||
ThreadCache* ThreadCache::thread_heaps_;
|
||||
int ThreadCache::thread_heap_count_;
|
||||
ThreadCache* ThreadCache::next_memory_steal_;
|
||||
|
||||
ThreadCache::ThreadCache() {
|
||||
ASSERT(Static::pageheap_lock()->IsHeld());
|
||||
|
@ -117,7 +117,7 @@ ThreadCache::~ThreadCache() {
|
|||
}
|
||||
|
||||
// Remove some objects of class "cl" from central cache and add to thread heap.
|
||||
// On success, return the first object for immediate use; otherwise return NULL.
|
||||
// On success, return the first object for immediate use; otherwise return nullptr.
|
||||
void* ThreadCache::FetchFromCentralCache(uint32_t cl, int32_t byte_size,
|
||||
void *(*oom_handler)(size_t size)) {
|
||||
FreeList* list = &list_[cl];
|
||||
|
@ -130,10 +130,10 @@ void* ThreadCache::FetchFromCentralCache(uint32_t cl, int32_t byte_size,
|
|||
&start, &end, num_to_move);
|
||||
|
||||
if (fetch_count == 0) {
|
||||
ASSERT(start == NULL);
|
||||
ASSERT(start == nullptr);
|
||||
return oom_handler(byte_size);
|
||||
}
|
||||
ASSERT(start != NULL);
|
||||
ASSERT(start != nullptr);
|
||||
|
||||
if (--fetch_count >= 0) {
|
||||
size_ += byte_size * fetch_count;
|
||||
|
@ -265,8 +265,8 @@ void ThreadCache::IncreaseCacheLimitLocked() {
|
|||
for (int i = 0; i < 10;
|
||||
++i, next_memory_steal_ = next_memory_steal_->next_) {
|
||||
// Reached the end of the linked list. Start at the beginning.
|
||||
if (next_memory_steal_ == NULL) {
|
||||
ASSERT(thread_heaps_ != NULL);
|
||||
if (next_memory_steal_ == nullptr) {
|
||||
ASSERT(thread_heaps_ != nullptr);
|
||||
next_memory_steal_ = thread_heaps_;
|
||||
}
|
||||
if (next_memory_steal_ == this ||
|
||||
|
@ -294,7 +294,7 @@ void ThreadCache::InitModule() {
|
|||
}
|
||||
const char *tcb = TCMallocGetenvSafe("TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES");
|
||||
if (tcb) {
|
||||
set_overall_thread_cache_size(strtoll(tcb, NULL, 10));
|
||||
set_overall_thread_cache_size(strtoll(tcb, nullptr, 10));
|
||||
}
|
||||
Static::InitStaticVars();
|
||||
threadcache_allocator.Init();
|
||||
|
@ -320,12 +320,12 @@ ThreadCache* ThreadCache::NewHeap() {
|
|||
ThreadCache *heap = new (threadcache_allocator.New()) ThreadCache();
|
||||
|
||||
heap->next_ = thread_heaps_;
|
||||
heap->prev_ = NULL;
|
||||
if (thread_heaps_ != NULL) {
|
||||
heap->prev_ = nullptr;
|
||||
if (thread_heaps_ != nullptr) {
|
||||
thread_heaps_->prev_ = heap;
|
||||
} else {
|
||||
// This is the only thread heap at the momment.
|
||||
ASSERT(next_memory_steal_ == NULL);
|
||||
ASSERT(next_memory_steal_ == nullptr);
|
||||
next_memory_steal_ = heap;
|
||||
}
|
||||
thread_heaps_ = heap;
|
||||
|
@ -339,13 +339,13 @@ void ThreadCache::DeleteCache(ThreadCache* heap) {
|
|||
|
||||
// Remove from linked list
|
||||
SpinLockHolder h(Static::pageheap_lock());
|
||||
if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
|
||||
if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
|
||||
if (heap->next_ != nullptr) heap->next_->prev_ = heap->prev_;
|
||||
if (heap->prev_ != nullptr) heap->prev_->next_ = heap->next_;
|
||||
if (thread_heaps_ == heap) thread_heaps_ = heap->next_;
|
||||
thread_heap_count_--;
|
||||
|
||||
if (next_memory_steal_ == heap) next_memory_steal_ = heap->next_;
|
||||
if (next_memory_steal_ == NULL) next_memory_steal_ = thread_heaps_;
|
||||
if (next_memory_steal_ == nullptr) next_memory_steal_ = thread_heaps_;
|
||||
unclaimed_cache_space_ += heap->max_size_;
|
||||
|
||||
threadcache_allocator.Delete(heap);
|
||||
|
@ -363,7 +363,7 @@ void ThreadCache::RecomputePerThreadCacheSize() {
|
|||
|
||||
double ratio = space / max<double>(1, per_thread_cache_size_);
|
||||
size_t claimed = 0;
|
||||
for (ThreadCache* h = thread_heaps_; h != NULL; h = h->next_) {
|
||||
for (ThreadCache* h = thread_heaps_; h != nullptr; h = h->next_) {
|
||||
// Increasing the total cache size should not circumvent the
|
||||
// slow-start growth of max_size_.
|
||||
if (ratio < 1.0) {
|
||||
|
@ -376,7 +376,7 @@ void ThreadCache::RecomputePerThreadCacheSize() {
|
|||
}
|
||||
|
||||
void ThreadCache::GetThreadStats(uint64_t* total_bytes, uint64_t* class_count) {
|
||||
for (ThreadCache* h = thread_heaps_; h != NULL; h = h->next_) {
|
||||
for (ThreadCache* h = thread_heaps_; h != nullptr; h = h->next_) {
|
||||
*total_bytes += h->Size();
|
||||
if (class_count) {
|
||||
for (int cl = 0; cl < Static::num_size_classes(); ++cl) {
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
#include <config.h>
|
||||
#include <atomic>
|
||||
#include <stddef.h> // for size_t, NULL
|
||||
#include <stddef.h> // for size_t
|
||||
#include <stdint.h> // for uint32_t, uint64_t
|
||||
#include <sys/types.h> // for ssize_t
|
||||
#include "base/commandlineflags.h"
|
||||
|
@ -96,7 +96,7 @@ class ThreadCache {
|
|||
static inline int HeapsInUse();
|
||||
|
||||
// Adds to *total_bytes the total number of bytes used by all thread heaps.
|
||||
// Also, if class_count is not NULL, it must be an array of size kNumClasses,
|
||||
// Also, if class_count is not nullptr, it must be an array of size kNumClasses,
|
||||
// and this function will increment each element of class_count by the number
|
||||
// of items in all thread-local freelists of the corresponding size class.
|
||||
// REQUIRES: Static::pageheap_lock is held.
|
||||
|
@ -149,7 +149,7 @@ class ThreadCache {
|
|||
|
||||
public:
|
||||
void Init(size_t size) {
|
||||
list_ = NULL;
|
||||
list_ = nullptr;
|
||||
length_ = 0;
|
||||
lowater_ = 0;
|
||||
max_length_ = 1;
|
||||
|
@ -188,7 +188,7 @@ class ThreadCache {
|
|||
|
||||
// Is list empty?
|
||||
bool empty() const {
|
||||
return list_ == NULL;
|
||||
return list_ == nullptr;
|
||||
}
|
||||
|
||||
// Low-water mark management
|
||||
|
@ -203,7 +203,7 @@ class ThreadCache {
|
|||
}
|
||||
|
||||
void* Pop() {
|
||||
ASSERT(list_ != NULL);
|
||||
ASSERT(list_ != nullptr);
|
||||
length_--;
|
||||
if (length_ < lowater_) lowater_ = length_;
|
||||
return SLL_Pop(&list_);
|
||||
|
|
|
@ -101,7 +101,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
process = GetCurrentProcess();
|
||||
|
||||
if (!SymInitialize(process, NULL, FALSE)) {
|
||||
if (!SymInitialize(process, nullptr, FALSE)) {
|
||||
error = GetLastError();
|
||||
fprintf(stderr, "SymInitialize returned error : %lu\n", error);
|
||||
return 1;
|
||||
|
@ -128,7 +128,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
SymSetOptions(symopts);
|
||||
module_base = SymLoadModuleEx(process, NULL, filename, NULL, 0, 0, NULL, 0);
|
||||
module_base = SymLoadModuleEx(process, nullptr, filename, nullptr, 0, 0, nullptr, 0);
|
||||
if (!module_base) {
|
||||
/* SymLoadModuleEx failed */
|
||||
error = GetLastError();
|
||||
|
@ -143,7 +143,7 @@ int main(int argc, char *argv[]) {
|
|||
/* GNU addr2line seems to just do a strtol and ignore any
|
||||
* weird characters it gets, so we will too.
|
||||
*/
|
||||
unsigned __int64 reladdr = _strtoui64(buf, NULL, 16);
|
||||
unsigned __int64 reladdr = _strtoui64(buf, nullptr, 16);
|
||||
ULONG64 buffer[(sizeof(SYMBOL_INFO) +
|
||||
MAX_SYM_NAME*sizeof(TCHAR) +
|
||||
sizeof(ULONG64) - 1)
|
||||
|
@ -164,7 +164,7 @@ int main(int argc, char *argv[]) {
|
|||
// The length of the name is not including the null-terminating character.
|
||||
pSymbol->MaxNameLen = MAX_SYM_NAME - 1;
|
||||
if (print_function_name) {
|
||||
if (SymFromAddr(process, (DWORD64)absaddr, NULL, pSymbol)) {
|
||||
if (SymFromAddr(process, (DWORD64)absaddr, nullptr, pSymbol)) {
|
||||
printf("%s\n", pSymbol->Name);
|
||||
} else {
|
||||
printf("??\n");
|
||||
|
|
|
@ -117,7 +117,7 @@ class AutoTestingHookImpl : public AutoTestingHookBase {
|
|||
private:
|
||||
AutoTestingHookImpl(T target_function, T replacement_function, bool do_it)
|
||||
: target_function_(target_function),
|
||||
original_function_(NULL),
|
||||
original_function_(nullptr),
|
||||
replacement_function_(replacement_function),
|
||||
did_it_(do_it) {
|
||||
if (do_it) {
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
#include <sys/types.h> // for size_t
|
||||
#include <new> // for nothrow_t
|
||||
|
||||
static char m; // some dummy memory so new doesn't return NULL.
|
||||
static char m; // some dummy memory so new doesn't return nullptr.
|
||||
|
||||
void* operator new(size_t size) { return &m; }
|
||||
void operator delete(void* p) throw() { }
|
||||
|
|
|
@ -192,7 +192,7 @@ int main(int argc, char *argv[]) {
|
|||
SYM_CONTEXT ctx;
|
||||
int i;
|
||||
char* search;
|
||||
char* filename = NULL;
|
||||
char* filename = nullptr;
|
||||
int rv = 0;
|
||||
/* We may add SYMOPT_UNDNAME if --demangle is specified: */
|
||||
DWORD symopts = SYMOPT_DEFERRED_LOADS | SYMOPT_DEBUG;
|
||||
|
@ -215,7 +215,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
process = GetCurrentProcess();
|
||||
|
||||
if (!SymInitialize(process, NULL, FALSE)) {
|
||||
if (!SymInitialize(process, nullptr, FALSE)) {
|
||||
error = GetLastError();
|
||||
fprintf(stderr, "SymInitialize returned error : %d\n", error);
|
||||
return 1;
|
||||
|
@ -242,7 +242,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
SymSetOptions(symopts);
|
||||
module_base = SymLoadModuleEx(process, NULL, filename, NULL, 0, 0, NULL, 0);
|
||||
module_base = SymLoadModuleEx(process, nullptr, filename, nullptr, 0, 0, nullptr, 0);
|
||||
if (!module_base) {
|
||||
/* SymLoadModuleEx failed */
|
||||
error = GetLastError();
|
||||
|
@ -256,7 +256,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.module_base = module_base;
|
||||
if (!SymEnumSymbols(process, module_base, NULL, EnumSymProc, &ctx)) {
|
||||
if (!SymEnumSymbols(process, module_base, nullptr, EnumSymProc, &ctx)) {
|
||||
error = GetLastError();
|
||||
fprintf(stderr, "SymEnumSymbols returned error: %d\n", error);
|
||||
rv = 1;
|
||||
|
|
|
@ -70,7 +70,7 @@ void* _recalloc(void* old_ptr, size_t n, size_t size) {
|
|||
// Ensure that (n * size) does not overflow
|
||||
if (!(n == 0 || (std::numeric_limits<size_t>::max)() / n >= size)) {
|
||||
errno = ENOMEM;
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const size_t old_size = tc_malloc_size(old_ptr);
|
||||
|
@ -80,7 +80,7 @@ void* _recalloc(void* old_ptr, size_t n, size_t size) {
|
|||
|
||||
// If the reallocation succeeded and the new block is larger, zero-fill the
|
||||
// new bytes:
|
||||
if (new_ptr != NULL && new_size > old_size) {
|
||||
if (new_ptr != nullptr && new_size > old_size) {
|
||||
memset(static_cast<char*>(new_ptr) + old_size, 0, tc_nallocx(new_size, 0) - old_size);
|
||||
}
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ class LibcInfo {
|
|||
}
|
||||
|
||||
// Populates all the windows_fn_[] vars based on our module info.
|
||||
// Returns false if windows_fn_ is all NULL's, because there's
|
||||
// Returns false if windows_fn_ is all nullptr's, because there's
|
||||
// nothing to patch. Also populates the rest of the module_entry
|
||||
// info, such as the module's name.
|
||||
bool PopulateWindowsFn(const ModuleEntryCopy& module_entry);
|
||||
|
@ -214,7 +214,7 @@ class LibcInfo {
|
|||
// PatchOneModule) won't work, since there are no dlls. Instead,
|
||||
// you just want to be taking the address of malloc/etc directly.
|
||||
// In the common, non-static-link case, these pointers will all be
|
||||
// NULL, since this initializer runs before msvcrt.dll is loaded.
|
||||
// nullptr, since this initializer runs before msvcrt.dll is loaded.
|
||||
static const GenericFnPtr static_fn_[kNumFunctions];
|
||||
|
||||
// This is the address of the function we are going to patch
|
||||
|
@ -316,7 +316,7 @@ struct ModuleEntryCopy {
|
|||
GenericFnPtr rgProcAddresses[LibcInfo::ctrgProcAddress];
|
||||
|
||||
ModuleEntryCopy() {
|
||||
modBaseAddr = NULL;
|
||||
modBaseAddr = nullptr;
|
||||
modBaseSize = 0;
|
||||
for (int i = 0; i < sizeof(rgProcAddresses)/sizeof(*rgProcAddresses); i++)
|
||||
rgProcAddresses[i] = LibcInfo::static_fn(i);
|
||||
|
@ -336,7 +336,7 @@ struct ModuleEntryCopy {
|
|||
if (modBaseAddr <= target_addr && target_addr < modEndAddr)
|
||||
rgProcAddresses[i] = (GenericFnPtr)target;
|
||||
else
|
||||
rgProcAddresses[i] = (GenericFnPtr)NULL;
|
||||
rgProcAddresses[i] = (GenericFnPtr)nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -404,11 +404,11 @@ const char* const LibcInfo::function_name_[] = {
|
|||
// Ideally we should patch the nothrow versions of new/delete, but
|
||||
// at least in msvcrt, nothrow-new machine-code is of a type we
|
||||
// can't patch. Since these are relatively rare, I'm hoping it's ok
|
||||
// not to patch them. (NULL name turns off patching.)
|
||||
NULL, // kMangledNewNothrow,
|
||||
NULL, // kMangledNewArrayNothrow,
|
||||
NULL, // kMangledDeleteNothrow,
|
||||
NULL, // kMangledDeleteArrayNothrow,
|
||||
// not to patch them. (nullptr name turns off patching.)
|
||||
nullptr, // kMangledNewNothrow,
|
||||
nullptr, // kMangledNewArrayNothrow,
|
||||
nullptr, // kMangledDeleteNothrow,
|
||||
nullptr, // kMangledDeleteArrayNothrow,
|
||||
"_msize", "_expand", "_calloc_crt", "_free_base", "_free_dbg"
|
||||
};
|
||||
|
||||
|
@ -423,7 +423,7 @@ const GenericFnPtr LibcInfo::static_fn_[] = {
|
|||
(GenericFnPtr)&::realloc,
|
||||
(GenericFnPtr)&::calloc,
|
||||
#ifdef __MINGW32__
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
|
||||
#else
|
||||
(GenericFnPtr)(void*(*)(size_t))&::operator new,
|
||||
(GenericFnPtr)(void*(*)(size_t))&::operator new[],
|
||||
|
@ -477,10 +477,10 @@ const GenericFnPtr LibcInfoWithPatchFunctions<T>::perftools_fn_[] = {
|
|||
};
|
||||
|
||||
/*static*/ WindowsInfo::FunctionInfo WindowsInfo::function_info_[] = {
|
||||
{ "HeapAlloc", NULL, NULL, (GenericFnPtr)&Perftools_HeapAlloc },
|
||||
{ "HeapFree", NULL, NULL, (GenericFnPtr)&Perftools_HeapFree },
|
||||
{ "LoadLibraryExW", NULL, NULL, (GenericFnPtr)&Perftools_LoadLibraryExW },
|
||||
{ "FreeLibrary", NULL, NULL, (GenericFnPtr)&Perftools_FreeLibrary },
|
||||
{ "HeapAlloc", nullptr, nullptr, (GenericFnPtr)&Perftools_HeapAlloc },
|
||||
{ "HeapFree", nullptr, nullptr, (GenericFnPtr)&Perftools_HeapFree },
|
||||
{ "LoadLibraryExW", nullptr, nullptr, (GenericFnPtr)&Perftools_LoadLibraryExW },
|
||||
{ "FreeLibrary", nullptr, nullptr, (GenericFnPtr)&Perftools_FreeLibrary },
|
||||
};
|
||||
|
||||
bool LibcInfo::PopulateWindowsFn(const ModuleEntryCopy& module_entry) {
|
||||
|
@ -499,29 +499,29 @@ bool LibcInfo::PopulateWindowsFn(const ModuleEntryCopy& module_entry) {
|
|||
}
|
||||
|
||||
// Some modules use the same function pointer for new and new[]. If
|
||||
// we find that, set one of the pointers to NULL so we don't double-
|
||||
// we find that, set one of the pointers to nullptr so we don't double-
|
||||
// patch. Same may happen with new and nothrow-new, or even new[]
|
||||
// and nothrow-new. It's easiest just to check each fn-ptr against
|
||||
// every other.
|
||||
for (int i = 0; i < kNumFunctions; i++) {
|
||||
for (int j = i+1; j < kNumFunctions; j++) {
|
||||
if (windows_fn_[i] == windows_fn_[j]) {
|
||||
// We NULL the later one (j), so as to minimize the chances we
|
||||
// NULL kFree and kRealloc. See comments below. This is fragile!
|
||||
windows_fn_[j] = NULL;
|
||||
// We nullptr the later one (j), so as to minimize the chances we
|
||||
// nullptr kFree and kRealloc. See comments below. This is fragile!
|
||||
windows_fn_[j] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// There's always a chance that our module uses the same function
|
||||
// as another module that we've already loaded. In that case, we
|
||||
// need to set our windows_fn to NULL, to avoid double-patching.
|
||||
// need to set our windows_fn to nullptr, to avoid double-patching.
|
||||
for (int ifn = 0; ifn < kNumFunctions; ifn++) {
|
||||
for (int imod = 0;
|
||||
imod < sizeof(g_module_libcs)/sizeof(*g_module_libcs); imod++) {
|
||||
if (g_module_libcs[imod]->is_valid() &&
|
||||
this->windows_fn(ifn) == g_module_libcs[imod]->windows_fn(ifn)) {
|
||||
windows_fn_[ifn] = NULL;
|
||||
windows_fn_[ifn] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -534,8 +534,8 @@ bool LibcInfo::PopulateWindowsFn(const ModuleEntryCopy& module_entry) {
|
|||
if (!found_non_null)
|
||||
return false;
|
||||
|
||||
// It's important we didn't NULL out windows_fn_[kFree] or [kRealloc].
|
||||
// The reason is, if those are NULL-ed out, we'll never patch them
|
||||
// It's important we didn't nullptr out windows_fn_[kFree] or [kRealloc].
|
||||
// The reason is, if those are nullptr-ed out, we'll never patch them
|
||||
// and thus never get an origstub_fn_ value for them, and when we
|
||||
// try to call origstub_fn_[kFree/kRealloc] in Perftools_free and
|
||||
// Perftools_realloc, below, it will fail. We could work around
|
||||
|
@ -555,13 +555,13 @@ bool LibcInfoWithPatchFunctions<T>::Patch(const LibcInfo& me_info) {
|
|||
CopyFrom(me_info); // copies the module_entry and the windows_fn_ array
|
||||
for (int i = 0; i < kNumFunctions; i++) {
|
||||
if (windows_fn_[i] && windows_fn_[i] != perftools_fn_[i]) {
|
||||
// if origstub_fn_ is not NULL, it's left around from a previous
|
||||
// patch. We need to set it to NULL for the new Patch call.
|
||||
// if origstub_fn_ is not nullptr, it's left around from a previous
|
||||
// patch. We need to set it to nullptr for the new Patch call.
|
||||
//
|
||||
// Note that origstub_fn_ was logically freed by
|
||||
// PreamblePatcher::Unpatch, so we don't have to do anything
|
||||
// about it.
|
||||
origstub_fn_[i] = NULL; // Patch() will fill this in
|
||||
origstub_fn_[i] = nullptr; // Patch() will fill this in
|
||||
CHECK_EQ(sidestep::SIDESTEP_SUCCESS,
|
||||
PreamblePatcher::Patch(windows_fn_[i], perftools_fn_[i],
|
||||
&origstub_fn_[i]));
|
||||
|
@ -587,23 +587,23 @@ void LibcInfoWithPatchFunctions<T>::Unpatch() {
|
|||
|
||||
void WindowsInfo::Patch() {
|
||||
HMODULE hkernel32 = ::GetModuleHandleA("kernel32");
|
||||
CHECK_NE(hkernel32, NULL);
|
||||
CHECK_NE(hkernel32, nullptr);
|
||||
|
||||
// Unlike for libc, we know these exist in our module, so we can get
|
||||
// and patch at the same time.
|
||||
for (int i = 0; i < kNumFunctions; i++) {
|
||||
function_info_[i].windows_fn = (GenericFnPtr)
|
||||
::GetProcAddress(hkernel32, function_info_[i].name);
|
||||
// If origstub_fn is not NULL, it's left around from a previous
|
||||
// patch. We need to set it to NULL for the new Patch call.
|
||||
// If origstub_fn is not nullptr, it's left around from a previous
|
||||
// patch. We need to set it to nullptr for the new Patch call.
|
||||
// Since we've patched Unpatch() not to delete origstub_fn_ (it
|
||||
// causes problems in some contexts, though obviously not this
|
||||
// one), we should delete it now, before setting it to NULL.
|
||||
// one), we should delete it now, before setting it to nullptr.
|
||||
// NOTE: casting from a function to a pointer is contra the C++
|
||||
// spec. It's not safe on IA64, but is on i386. We use
|
||||
// a C-style cast here to emphasize this is not legal C++.
|
||||
delete[] (char*)(function_info_[i].origstub_fn);
|
||||
function_info_[i].origstub_fn = NULL; // Patch() will fill this in
|
||||
function_info_[i].origstub_fn = nullptr; // Patch() will fill this in
|
||||
CHECK_EQ(sidestep::SIDESTEP_SUCCESS,
|
||||
PreamblePatcher::Patch(function_info_[i].windows_fn,
|
||||
function_info_[i].perftools_fn,
|
||||
|
@ -837,7 +837,7 @@ void LibcInfoWithPatchFunctions<T>::Perftools_free_dbg(void* ptr, int block_use)
|
|||
template<int T>
|
||||
void* LibcInfoWithPatchFunctions<T>::Perftools_realloc(
|
||||
void* old_ptr, size_t new_size) __THROW {
|
||||
if (old_ptr == NULL) {
|
||||
if (old_ptr == nullptr) {
|
||||
void* result = do_malloc_or_cpp_alloc(new_size);
|
||||
MallocHook::InvokeNewHook(result, new_size);
|
||||
return result;
|
||||
|
@ -846,7 +846,7 @@ void* LibcInfoWithPatchFunctions<T>::Perftools_realloc(
|
|||
MallocHook::InvokeDeleteHook(old_ptr);
|
||||
do_free_with_callback(old_ptr,
|
||||
(void (*)(void*))origstub_fn_[kFree], false, 0);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return do_realloc_with_callback(
|
||||
old_ptr, new_size,
|
||||
|
@ -984,7 +984,7 @@ BOOL WINAPI WindowsInfo::Perftools_FreeLibrary(HMODULE hLibModule) {
|
|||
// address and seeing if it comes back with the same address. If it
|
||||
// is the same address it's still loaded, so the FreeLibrary() call
|
||||
// was a noop, and there's no need to redo the patching.
|
||||
HMODULE owner = NULL;
|
||||
HMODULE owner = nullptr;
|
||||
BOOL result = ::GetModuleHandleExW(
|
||||
(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
|
||||
GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT),
|
||||
|
|
|
@ -119,7 +119,7 @@ struct DestrFnClosure {
|
|||
tcmalloc::TlsKey key_for_destr_fn_arg;
|
||||
};
|
||||
|
||||
static DestrFnClosure destr_fn_info; // initted to all NULL/0.
|
||||
static DestrFnClosure destr_fn_info; // initted to all nullptr/0.
|
||||
|
||||
static int on_process_term(void) {
|
||||
if (destr_fn_info.destr_fn) {
|
||||
|
@ -127,8 +127,8 @@ static int on_process_term(void) {
|
|||
// This shouldn't be necessary, but in Release mode, Windows
|
||||
// sometimes trashes the pointer in the TLS slot, so we need to
|
||||
// remove the pointer from the TLS slot before the thread dies.
|
||||
TlsSetValue(destr_fn_info.key_for_destr_fn_arg, NULL);
|
||||
if (ptr) // pthread semantics say not to call if ptr is NULL
|
||||
TlsSetValue(destr_fn_info.key_for_destr_fn_arg, nullptr);
|
||||
if (ptr) // pthread semantics say not to call if ptr is nullptr
|
||||
(*destr_fn_info.destr_fn)(ptr);
|
||||
}
|
||||
return 0;
|
||||
|
@ -185,11 +185,11 @@ BOOL WINAPI DllMain(HINSTANCE h, DWORD dwReason, PVOID pv) {
|
|||
tcmalloc::TlsKey tcmalloc::WinTlsKeyCreate(void (*destr_fn)(void*)) {
|
||||
// Semantics are: we create a new key, and then promise to call
|
||||
// destr_fn with TlsGetValue(key) when the thread is destroyed
|
||||
// (as long as TlsGetValue(key) is not NULL).
|
||||
// (as long as TlsGetValue(key) is not nullptr).
|
||||
tcmalloc::TlsKey key = TlsAlloc();
|
||||
if (destr_fn) { // register it
|
||||
// If this assert fails, we'll need to support an array of destr_fn_infos
|
||||
assert(destr_fn_info.destr_fn == NULL);
|
||||
assert(destr_fn_info.destr_fn == nullptr);
|
||||
destr_fn_info.destr_fn = destr_fn;
|
||||
destr_fn_info.key_for_destr_fn_arg = key;
|
||||
}
|
||||
|
|
|
@ -155,11 +155,11 @@ typedef _off_t off_t;
|
|||
/* VirtualAlloc only replaces for mmap when certain invariants are kept. */
|
||||
inline void *mmap(void *addr, size_t length, int prot, int flags,
|
||||
int fd, off_t offset) {
|
||||
if (addr == NULL && fd == -1 && offset == 0 &&
|
||||
if (addr == nullptr && fd == -1 && offset == 0 &&
|
||||
prot == (PROT_READ|PROT_WRITE) && flags == (MAP_PRIVATE|MAP_ANONYMOUS)) {
|
||||
return VirtualAlloc(0, length, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,7 +228,7 @@ EXTERN_C PERFTOOLS_DLL_DECL void WriteToStderr(const char* buf, int len);
|
|||
|
||||
/* Handle case when poll is used to simulate sleep. */
|
||||
inline int poll(struct pollfd* fds, int nfds, int timeout) {
|
||||
assert(fds == NULL);
|
||||
assert(fds == nullptr);
|
||||
assert(nfds == 0);
|
||||
Sleep(timeout);
|
||||
return 0;
|
||||
|
|
|
@ -59,10 +59,10 @@
|
|||
|
||||
namespace sidestep {
|
||||
|
||||
PreamblePatcher::PreamblePage* PreamblePatcher::preamble_pages_ = NULL;
|
||||
long PreamblePatcher::granularity_ = 0;
|
||||
long PreamblePatcher::pagesize_ = 0;
|
||||
bool PreamblePatcher::initialized_ = false;
|
||||
PreamblePatcher::PreamblePage* PreamblePatcher::preamble_pages_;
|
||||
long PreamblePatcher::granularity_;
|
||||
long PreamblePatcher::pagesize_;
|
||||
bool PreamblePatcher::initialized_;
|
||||
|
||||
static const unsigned int kPreamblePageMagic = 0x4347414D; // "MAGC"
|
||||
|
||||
|
@ -84,8 +84,8 @@ static const unsigned int kPreamblePageMagic = 0x4347414D; // "MAGC"
|
|||
void* PreamblePatcher::ResolveTargetImpl(unsigned char* target,
|
||||
unsigned char* stop_before,
|
||||
bool stop_before_trampoline) {
|
||||
if (target == NULL)
|
||||
return NULL;
|
||||
if (target == nullptr)
|
||||
return nullptr;
|
||||
while (1) {
|
||||
unsigned char* new_target;
|
||||
if (target[0] == ASM_JMP32REL) {
|
||||
|
@ -159,7 +159,7 @@ class DeleteUnsignedCharArray {
|
|||
|
||||
unsigned char* Release() {
|
||||
unsigned char* temp = array_;
|
||||
array_ = NULL;
|
||||
array_ = nullptr;
|
||||
return temp;
|
||||
}
|
||||
|
||||
|
@ -268,7 +268,7 @@ SideStepError PreamblePatcher::RawPatch(void* target_function,
|
|||
|
||||
SideStepError error_code = RawPatchWithStubAndProtections(
|
||||
target_function, replacement_function, preamble_stub,
|
||||
MAX_PREAMBLE_STUB_SIZE, NULL);
|
||||
MAX_PREAMBLE_STUB_SIZE, nullptr);
|
||||
|
||||
if (SIDESTEP_SUCCESS != error_code) {
|
||||
SIDESTEP_ASSERT(false);
|
||||
|
@ -437,8 +437,8 @@ void PreamblePatcher::Initialize() {
|
|||
|
||||
unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
|
||||
PreamblePage* preamble_page = preamble_pages_;
|
||||
while (preamble_page != NULL) {
|
||||
if (preamble_page->free_ != NULL) {
|
||||
while (preamble_page != nullptr) {
|
||||
if (preamble_page->free_ != nullptr) {
|
||||
__int64 val = reinterpret_cast<__int64>(preamble_page) -
|
||||
reinterpret_cast<__int64>(target);
|
||||
if ((val > 0 && val + pagesize_ <= INT_MAX) ||
|
||||
|
@ -450,12 +450,12 @@ unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
|
|||
}
|
||||
|
||||
// The free_ member of the page is used to store the next available block
|
||||
// of memory to use or NULL if there are no chunks available, in which case
|
||||
// of memory to use or nullptr if there are no chunks available, in which case
|
||||
// we'll allocate a new page.
|
||||
if (preamble_page == NULL || preamble_page->free_ == NULL) {
|
||||
if (preamble_page == nullptr || preamble_page->free_ == nullptr) {
|
||||
// Create a new preamble page and initialize the free list
|
||||
preamble_page = reinterpret_cast<PreamblePage*>(AllocPageNear(target));
|
||||
SIDESTEP_ASSERT(preamble_page != NULL && "Could not allocate page!");
|
||||
SIDESTEP_ASSERT(preamble_page != nullptr && "Could not allocate page!");
|
||||
void** pp = &preamble_page->free_;
|
||||
unsigned char* ptr = reinterpret_cast<unsigned char*>(preamble_page) +
|
||||
MAX_PREAMBLE_STUB_SIZE;
|
||||
|
@ -466,7 +466,7 @@ unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
|
|||
pp = reinterpret_cast<void**>(ptr);
|
||||
ptr += MAX_PREAMBLE_STUB_SIZE;
|
||||
}
|
||||
*pp = NULL;
|
||||
*pp = nullptr;
|
||||
// Insert the new page into the list
|
||||
preamble_page->magic_ = kPreamblePageMagic;
|
||||
preamble_page->next_ = preamble_pages_;
|
||||
|
@ -478,7 +478,7 @@ unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target) {
|
|||
}
|
||||
|
||||
void PreamblePatcher::FreePreambleBlock(unsigned char* block) {
|
||||
SIDESTEP_ASSERT(block != NULL);
|
||||
SIDESTEP_ASSERT(block != nullptr);
|
||||
SIDESTEP_ASSERT(granularity_ != 0);
|
||||
uintptr_t ptr = reinterpret_cast<uintptr_t>(block);
|
||||
ptr -= ptr & (granularity_ - 1);
|
||||
|
@ -498,12 +498,12 @@ void* PreamblePatcher::AllocPageNear(void* target) {
|
|||
PreamblePatcher::Initialize();
|
||||
SIDESTEP_ASSERT(initialized_);
|
||||
}
|
||||
void* pv = NULL;
|
||||
void* pv = nullptr;
|
||||
unsigned char* allocation_base = reinterpret_cast<unsigned char*>(
|
||||
mbi.AllocationBase);
|
||||
__int64 i = 1;
|
||||
bool high_target = reinterpret_cast<__int64>(target) > UINT_MAX;
|
||||
while (pv == NULL) {
|
||||
while (pv == nullptr) {
|
||||
__int64 val = reinterpret_cast<__int64>(allocation_base) -
|
||||
(i * granularity_);
|
||||
if (high_target &&
|
||||
|
@ -521,13 +521,13 @@ void* PreamblePatcher::AllocPageNear(void* target) {
|
|||
}
|
||||
|
||||
// We couldn't allocate low, try to allocate high
|
||||
if (pv == NULL) {
|
||||
if (pv == nullptr) {
|
||||
i = 1;
|
||||
// Round up to the next multiple of page granularity
|
||||
allocation_base = reinterpret_cast<unsigned char*>(
|
||||
(reinterpret_cast<__int64>(target) &
|
||||
(~(granularity_ - 1))) + granularity_);
|
||||
while (pv == NULL) {
|
||||
while (pv == nullptr) {
|
||||
__int64 val = reinterpret_cast<__int64>(allocation_base) +
|
||||
(i * granularity_) - reinterpret_cast<__int64>(target);
|
||||
if (val > INT_MAX || val < 0) {
|
||||
|
|
|
@ -161,7 +161,7 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
// example:
|
||||
// @code
|
||||
// typedef BOOL (WINAPI *MessageBoxPtr)(HWND, LPCTSTR, LPCTSTR, UINT);
|
||||
// MessageBoxPtr original = NULL;
|
||||
// MessageBoxPtr original = nullptr;
|
||||
// PreamblePatcher::Patch(MessageBox, Hook_MessageBox, &original);
|
||||
// @endcode
|
||||
template <class T>
|
||||
|
@ -195,7 +195,7 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
//
|
||||
// @param original_function_stub Pointer to memory that should receive a
|
||||
// pointer that can be used (e.g. in the replacement function) to call the
|
||||
// original function, or NULL to indicate failure.
|
||||
// original function, or nullptr to indicate failure.
|
||||
//
|
||||
// @return One of the EnSideStepError error codes; only SIDESTEP_SUCCESS
|
||||
// indicates success.
|
||||
|
@ -211,7 +211,7 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
return SIDESTEP_INVALID_PARAMETER;
|
||||
}
|
||||
HMODULE module = ::GetModuleHandle(module_name);
|
||||
SIDESTEP_ASSERT(module != NULL);
|
||||
SIDESTEP_ASSERT(module != nullptr);
|
||||
if (!module) {
|
||||
SIDESTEP_ASSERT(false && "Invalid module name.");
|
||||
return SIDESTEP_NO_SUCH_MODULE;
|
||||
|
@ -245,11 +245,11 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
//
|
||||
// @param original_function_stub Pointer to memory that should receive a
|
||||
// pointer that can be used (e.g. in the replacement function) to call the
|
||||
// original function, or NULL to indicate failure.
|
||||
// original function, or nullptr to indicate failure.
|
||||
//
|
||||
// @param original_function_stub Pointer to memory that should receive a
|
||||
// pointer that can be used (e.g. in the replacement function) to call the
|
||||
// original function, or NULL to indicate failure.
|
||||
// original function, or nullptr to indicate failure.
|
||||
//
|
||||
// @return One of the EnSideStepError error codes; only SIDESTEP_SUCCESS
|
||||
// indicates success.
|
||||
|
@ -307,7 +307,7 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
// of a chain of JMPs).
|
||||
template <class T>
|
||||
static T ResolveTarget(T target_function) {
|
||||
return (T)ResolveTargetImpl((unsigned char*)target_function, NULL);
|
||||
return (T)ResolveTargetImpl((unsigned char*)target_function, nullptr);
|
||||
}
|
||||
|
||||
// Allocates a block of memory of size MAX_PREAMBLE_STUB_SIZE that is as
|
||||
|
@ -335,7 +335,7 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
unsigned int magic_;
|
||||
PreamblePage* next_;
|
||||
// This member points to a linked list of free blocks within the page
|
||||
// or NULL if at the end
|
||||
// or nullptr if at the end
|
||||
void* free_;
|
||||
};
|
||||
|
||||
|
@ -383,7 +383,7 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
// preamble_stub
|
||||
//
|
||||
// @param bytes_needed Pointer to a variable that receives the minimum
|
||||
// number of bytes required for the stub. Can be set to NULL if you're
|
||||
// number of bytes required for the stub. Can be set to nullptr if you're
|
||||
// not interested.
|
||||
//
|
||||
// @return An error code indicating the result of patching.
|
||||
|
@ -414,7 +414,7 @@ class PERFTOOLS_DLL_DECL PreamblePatcher {
|
|||
// preamble_stub
|
||||
//
|
||||
// @param bytes_needed Pointer to a variable that receives the minimum
|
||||
// number of bytes required for the stub. Can be set to NULL if you're
|
||||
// number of bytes required for the stub. Can be set to nullptr if you're
|
||||
// not interested.
|
||||
//
|
||||
// @return An error code indicating the result of patching.
|
||||
|
|
|
@ -93,10 +93,10 @@ extern "C" int JumpAbsoluteFunction(int);
|
|||
extern "C" int CallNearRelativeFunction(int);
|
||||
|
||||
typedef int (*IncrementingFunc)(int);
|
||||
IncrementingFunc original_function = NULL;
|
||||
IncrementingFunc original_function;
|
||||
|
||||
int HookIncrementNumber(int i) {
|
||||
SIDESTEP_ASSERT(original_function != NULL);
|
||||
SIDESTEP_ASSERT(original_function != nullptr);
|
||||
int incremented_once = original_function(i);
|
||||
return incremented_once + 1;
|
||||
}
|
||||
|
@ -132,10 +132,10 @@ bool TestDisassembler() {
|
|||
}
|
||||
|
||||
bool TestPatchWithLongJump() {
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
void *p = ::VirtualAlloc(reinterpret_cast<void *>(0x0000020000000000), 4096,
|
||||
MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE);
|
||||
SIDESTEP_EXPECT_TRUE(p != NULL);
|
||||
SIDESTEP_EXPECT_TRUE(p != nullptr);
|
||||
memset(p, 0xcc, 4096);
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS ==
|
||||
sidestep::PreamblePatcher::Patch(IncrementNumber,
|
||||
|
@ -151,7 +151,7 @@ bool TestPatchWithLongJump() {
|
|||
}
|
||||
|
||||
bool TestPatchWithPreambleShortCondJump() {
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS ==
|
||||
sidestep::PreamblePatcher::Patch(JumpShortCondFunction,
|
||||
HookIncrementNumber,
|
||||
|
@ -165,7 +165,7 @@ bool TestPatchWithPreambleShortCondJump() {
|
|||
}
|
||||
|
||||
bool TestPatchWithPreambleNearRelativeCondJump() {
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS ==
|
||||
sidestep::PreamblePatcher::Patch(JumpNearCondFunction,
|
||||
HookIncrementNumber,
|
||||
|
@ -180,7 +180,7 @@ bool TestPatchWithPreambleNearRelativeCondJump() {
|
|||
}
|
||||
|
||||
bool TestPatchWithPreambleAbsoluteJump() {
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS ==
|
||||
sidestep::PreamblePatcher::Patch(JumpAbsoluteFunction,
|
||||
HookIncrementNumber,
|
||||
|
@ -195,7 +195,7 @@ bool TestPatchWithPreambleAbsoluteJump() {
|
|||
}
|
||||
|
||||
bool TestPatchWithPreambleNearRelativeCall() {
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS ==
|
||||
sidestep::PreamblePatcher::Patch(
|
||||
CallNearRelativeFunction,
|
||||
|
@ -211,7 +211,7 @@ bool TestPatchWithPreambleNearRelativeCall() {
|
|||
}
|
||||
|
||||
bool TestPatchUsingDynamicStub() {
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(IncrementNumber(1) == 2);
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS ==
|
||||
sidestep::PreamblePatcher::Patch(IncrementNumber,
|
||||
|
@ -232,7 +232,7 @@ bool TestPatchUsingDynamicStub() {
|
|||
// jmp to the hook function. So verify that we now can not patch
|
||||
// IncrementNumber because it starts with a jump.
|
||||
#if 0
|
||||
IncrementingFunc dummy = NULL;
|
||||
IncrementingFunc dummy = nullptr;
|
||||
// TODO(joi@chromium.org): restore this test once flag is added to
|
||||
// disable JMP following
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_JUMP_INSTRUCTION ==
|
||||
|
@ -243,7 +243,7 @@ bool TestPatchUsingDynamicStub() {
|
|||
// This test disabled because code in preamble_patcher_with_stub.cc
|
||||
// asserts before returning the error code -- so there is no way
|
||||
// to get an error code here, in debug build.
|
||||
dummy = NULL;
|
||||
dummy = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_FUNCTION_TOO_SMALL ==
|
||||
sidestep::PreamblePatcher::Patch(TooShortFunction,
|
||||
HookIncrementNumber,
|
||||
|
@ -258,7 +258,7 @@ bool TestPatchUsingDynamicStub() {
|
|||
}
|
||||
|
||||
bool PatchThenUnpatch() {
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS ==
|
||||
sidestep::PreamblePatcher::Patch(IncrementNumber,
|
||||
HookIncrementNumber,
|
||||
|
@ -271,7 +271,7 @@ bool PatchThenUnpatch() {
|
|||
UNPATCH(IncrementNumber,
|
||||
HookIncrementNumber,
|
||||
original_function));
|
||||
original_function = NULL;
|
||||
original_function = nullptr;
|
||||
SIDESTEP_EXPECT_TRUE(IncrementNumber(3) == 4);
|
||||
|
||||
return true;
|
||||
|
@ -314,12 +314,12 @@ bool TestPreambleAllocation() {
|
|||
void* p1 = reinterpret_cast<void*>(0x110000000);
|
||||
void* p2 = reinterpret_cast<void*>(0x810000000);
|
||||
unsigned char* b1 = PreamblePatcher::AllocPreambleBlockNear(p1);
|
||||
SIDESTEP_EXPECT_TRUE(b1 != NULL);
|
||||
SIDESTEP_EXPECT_TRUE(b1 != nullptr);
|
||||
diff = reinterpret_cast<__int64>(p1) - reinterpret_cast<__int64>(b1);
|
||||
// Ensure blocks are within 2GB
|
||||
SIDESTEP_EXPECT_TRUE(diff <= INT_MAX && diff >= INT_MIN);
|
||||
unsigned char* b2 = PreamblePatcher::AllocPreambleBlockNear(p2);
|
||||
SIDESTEP_EXPECT_TRUE(b2 != NULL);
|
||||
SIDESTEP_EXPECT_TRUE(b2 != nullptr);
|
||||
diff = reinterpret_cast<__int64>(p2) - reinterpret_cast<__int64>(b2);
|
||||
SIDESTEP_EXPECT_TRUE(diff <= INT_MAX && diff >= INT_MIN);
|
||||
|
||||
|
|
|
@ -59,12 +59,12 @@ SideStepError PreamblePatcher::RawPatchWithStub(
|
|||
unsigned char* preamble_stub,
|
||||
unsigned long stub_size,
|
||||
unsigned long* bytes_needed) {
|
||||
if ((NULL == target_function) ||
|
||||
(NULL == replacement_function) ||
|
||||
(NULL == preamble_stub)) {
|
||||
if ((nullptr == target_function) ||
|
||||
(nullptr == replacement_function) ||
|
||||
(nullptr == preamble_stub)) {
|
||||
SIDESTEP_ASSERT(false &&
|
||||
"Invalid parameters - either pTargetFunction or "
|
||||
"pReplacementFunction or pPreambleStub were NULL.");
|
||||
"pReplacementFunction or pPreambleStub were nullptr.");
|
||||
return SIDESTEP_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ SideStepError PreamblePatcher::RawPatchWithStub(
|
|||
preamble_bytes += cur_bytes;
|
||||
}
|
||||
|
||||
if (NULL != bytes_needed)
|
||||
if (nullptr != bytes_needed)
|
||||
*bytes_needed = stub_bytes + kRequiredStubJumpBytes
|
||||
+ required_trampoline_bytes;
|
||||
|
||||
|
|
|
@ -47,9 +47,9 @@
|
|||
static SpinLock spinlock;
|
||||
|
||||
// The current system allocator declaration
|
||||
SysAllocator* tcmalloc_sys_alloc = NULL;
|
||||
SysAllocator* tcmalloc_sys_alloc;
|
||||
// Number of bytes taken from system.
|
||||
size_t TCMalloc_SystemTaken = 0;
|
||||
size_t TCMalloc_SystemTaken;
|
||||
|
||||
class VirtualSysAllocator : public SysAllocator {
|
||||
public:
|
||||
|
@ -85,8 +85,8 @@ void* VirtualSysAllocator::Alloc(size_t size, size_t *actual_size,
|
|||
|
||||
void* result = VirtualAlloc(0, size,
|
||||
MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
|
||||
if (result == NULL)
|
||||
return NULL;
|
||||
if (result == nullptr)
|
||||
return nullptr;
|
||||
|
||||
// If the result is not aligned memory fragmentation will result which can
|
||||
// lead to pathological memory use.
|
||||
|
@ -136,7 +136,7 @@ void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size,
|
|||
}
|
||||
|
||||
void* result = tcmalloc_sys_alloc->Alloc(size, actual_size, alignment);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
if (actual_size) {
|
||||
TCMalloc_SystemTaken += *actual_size;
|
||||
} else {
|
||||
|
|
|
@ -81,7 +81,7 @@ extern "C" {
|
|||
#endif
|
||||
/*
|
||||
* Returns a human-readable version string. If major, minor,
|
||||
* and/or patch are not NULL, they are set to the major version,
|
||||
* and/or patch are not nullptr, they are set to the major version,
|
||||
* minor version, and patch-code (a string, usually "").
|
||||
*/
|
||||
PERFTOOLS_DLL_DECL const char* tc_version(int* major, int* minor,
|
||||
|
|
Loading…
Reference in New Issue