summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sanitizer_common/tests/sanitizer_allocator_test.cc')
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc300
1 files changed, 268 insertions, 32 deletions
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index d67f4636ef4f7..de949ca7defec 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -22,6 +22,7 @@
#include <pthread.h>
#include <algorithm>
#include <vector>
+#include <set>
// Too slow for debug build
#if TSAN_DEBUG == 0
@@ -40,8 +41,16 @@ typedef SizeClassAllocator64<
static const u64 kAddressSpaceSize = 1ULL << 32;
#endif
+static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
+static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
+
typedef SizeClassAllocator32<
- 0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
+ 0, kAddressSpaceSize,
+ /*kMetadataSize*/16,
+ CompactSizeClassMap,
+ kRegionSizeLog,
+ FlatByteMap<kFlatByteMapSize> >
+ Allocator32Compact;
template <class SizeClassMap>
void TestSizeClassMap() {
@@ -63,7 +72,8 @@ void TestSizeClassAllocator() {
Allocator *a = new Allocator;
a->Init();
SizeClassAllocatorLocalCache<Allocator> cache;
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
@@ -77,7 +87,7 @@ void TestSizeClassAllocator() {
uptr size = sizes[s];
if (!a->CanAllocate(size, 1)) continue;
// printf("s = %ld\n", size);
- uptr n_iter = std::max((uptr)6, 10000000 / size);
+ uptr n_iter = std::max((uptr)6, 8000000 / size);
// fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
for (uptr i = 0; i < n_iter; i++) {
uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
@@ -114,6 +124,12 @@ void TestSizeClassAllocator() {
CHECK_EQ(last_total_allocated, total_allocated);
}
+ // Check that GetBlockBegin never crashes.
+ for (uptr x = 0, step = kAddressSpaceSize / 100000;
+ x < kAddressSpaceSize - step; x += step)
+ if (a->PointerIsMine(reinterpret_cast<void *>(x)))
+ Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
+
a->TestOnlyUnmap();
delete a;
}
@@ -137,25 +153,28 @@ void SizeClassAllocatorMetadataStress() {
Allocator *a = new Allocator;
a->Init();
SizeClassAllocatorLocalCache<Allocator> cache;
- cache.Init();
- static volatile void *sink;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
- const uptr kNumAllocs = 10000;
+ const uptr kNumAllocs = 1 << 13;
void *allocated[kNumAllocs];
+ void *meta[kNumAllocs];
for (uptr i = 0; i < kNumAllocs; i++) {
void *x = cache.Allocate(a, 1 + i % 50);
allocated[i] = x;
+ meta[i] = a->GetMetaData(x);
}
// Get Metadata kNumAllocs^2 times.
for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
- sink = a->GetMetaData(allocated[i % kNumAllocs]);
+ uptr idx = i % kNumAllocs;
+ void *m = a->GetMetaData(allocated[idx]);
+ EXPECT_EQ(m, meta[idx]);
}
for (uptr i = 0; i < kNumAllocs; i++) {
cache.Deallocate(a, 1 + i % 50, allocated[i]);
}
a->TestOnlyUnmap();
- (void)sink;
delete a;
}
@@ -167,11 +186,47 @@ TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
-#endif
+#endif // SANITIZER_WORDSIZE == 64
TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator32Compact>();
}
+template <class Allocator>
+void SizeClassAllocatorGetBlockBeginStress() {
+ Allocator *a = new Allocator;
+ a->Init();
+ SizeClassAllocatorLocalCache<Allocator> cache;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+
+ uptr max_size_class = Allocator::kNumClasses - 1;
+ uptr size = Allocator::SizeClassMapT::Size(max_size_class);
+ u64 G8 = 1ULL << 33;
+ // Make sure we correctly compute GetBlockBegin() w/o overflow.
+ for (size_t i = 0; i <= G8 / size; i++) {
+ void *x = cache.Allocate(a, max_size_class);
+ void *beg = a->GetBlockBegin(x);
+ // if ((i & (i - 1)) == 0)
+ // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
+ EXPECT_EQ(x, beg);
+ }
+
+ a->TestOnlyUnmap();
+ delete a;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator64>();
+}
+TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
+}
+TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
+}
+#endif // SANITIZER_WORDSIZE == 64
+
struct TestMapUnmapCallback {
static int map_count, unmap_count;
void OnMap(uptr p, uptr size) const { map_count++; }
@@ -191,8 +246,11 @@ TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
a->Init();
EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
- cache.Init();
- a->AllocateBatch(&cache, 64);
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+ AllocatorStats stats;
+ stats.Init();
+ a->AllocateBatch(&stats, &cache, 32);
EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
a->TestOnlyUnmap();
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
@@ -204,17 +262,25 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
typedef SizeClassAllocator32<
- 0, kAddressSpaceSize, 16, CompactSizeClassMap,
- TestMapUnmapCallback> Allocator32WithCallBack;
+ 0, kAddressSpaceSize,
+ /*kMetadataSize*/16,
+ CompactSizeClassMap,
+ kRegionSizeLog,
+ FlatByteMap<kFlatByteMapSize>,
+ TestMapUnmapCallback>
+ Allocator32WithCallBack;
Allocator32WithCallBack *a = new Allocator32WithCallBack;
a->Init();
- EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
- cache.Init();
- a->AllocateBatch(&cache, 64);
- EXPECT_EQ(TestMapUnmapCallback::map_count, 2); // alloc.
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+ AllocatorStats stats;
+ stats.Init();
+ a->AllocateBatch(&stats, &cache, 32);
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
a->TestOnlyUnmap();
- EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2); // The whole thing + alloc.
+ EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
delete a;
// fprintf(stderr, "Map: %d Unmap: %d\n",
// TestMapUnmapCallback::map_count,
@@ -226,9 +292,11 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
TestMapUnmapCallback::unmap_count = 0;
LargeMmapAllocator<TestMapUnmapCallback> a;
a.Init();
- void *x = a.Allocate(1 << 20, 1);
+ AllocatorStats stats;
+ stats.Init();
+ void *x = a.Allocate(&stats, 1 << 20, 1);
EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
- a.Deallocate(x);
+ a.Deallocate(&stats, x);
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
}
@@ -237,9 +305,12 @@ void FailInAssertionOnOOM() {
Allocator a;
a.Init();
SizeClassAllocatorLocalCache<Allocator> cache;
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+ AllocatorStats stats;
+ stats.Init();
for (int i = 0; i < 1000000; i++) {
- a.AllocateBatch(&cache, 64);
+ a.AllocateBatch(&stats, &cache, 52);
}
a.TestOnlyUnmap();
@@ -254,13 +325,15 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
TEST(SanitizerCommon, LargeMmapAllocator) {
LargeMmapAllocator<> a;
a.Init();
+ AllocatorStats stats;
+ stats.Init();
static const int kNumAllocs = 1000;
char *allocated[kNumAllocs];
static const uptr size = 4000;
// Allocate some.
for (int i = 0; i < kNumAllocs; i++) {
- allocated[i] = (char *)a.Allocate(size, 1);
+ allocated[i] = (char *)a.Allocate(&stats, size, 1);
CHECK(a.PointerIsMine(allocated[i]));
}
// Deallocate all.
@@ -268,14 +341,14 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
for (int i = 0; i < kNumAllocs; i++) {
char *p = allocated[i];
CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
+ a.Deallocate(&stats, p);
}
// Check that non left.
CHECK_EQ(a.TotalMemoryUsed(), 0);
// Allocate some more, also add metadata.
for (int i = 0; i < kNumAllocs; i++) {
- char *x = (char *)a.Allocate(size, 1);
+ char *x = (char *)a.Allocate(&stats, size, 1);
CHECK_GE(a.GetActuallyAllocatedSize(x), size);
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
*meta = i;
@@ -294,7 +367,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
CHECK_EQ(*meta, idx);
CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
+ a.Deallocate(&stats, p);
}
CHECK_EQ(a.TotalMemoryUsed(), 0);
@@ -304,7 +377,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
const uptr kNumAlignedAllocs = 100;
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
uptr size = ((i % 10) + 1) * 4096;
- char *p = allocated[i] = (char *)a.Allocate(size, alignment);
+ char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
CHECK_EQ(p, a.GetBlockBegin(p));
CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
@@ -312,9 +385,17 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
p[0] = p[size - 1] = 0;
}
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
- a.Deallocate(allocated[i]);
+ a.Deallocate(&stats, allocated[i]);
}
}
+
+ // Regression test for boundary condition in GetBlockBegin().
+ uptr page_size = GetPageSizeCached();
+ char *p = (char *)a.Allocate(&stats, page_size, 1);
+ CHECK_EQ(p, a.GetBlockBegin(p));
+ CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
+ CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
+ a.Deallocate(&stats, p);
}
template
@@ -327,7 +408,8 @@ void TestCombinedAllocator() {
a->Init();
AllocatorCache cache;
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ a->InitCache(&cache);
EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
@@ -363,6 +445,7 @@ void TestCombinedAllocator() {
allocated.clear();
a->SwallowCache(&cache);
}
+ a->DestroyCache(&cache);
a->TestOnlyUnmap();
}
@@ -388,14 +471,13 @@ TEST(SanitizerCommon, CombinedAllocator32Compact) {
template <class AllocatorCache>
void TestSizeClassAllocatorLocalCache() {
- static AllocatorCache static_allocator_cache;
- static_allocator_cache.Init();
AllocatorCache cache;
typedef typename AllocatorCache::Allocator Allocator;
Allocator *a = new Allocator();
a->Init();
- cache.Init();
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
const uptr kNumAllocs = 10000;
const int kNumIter = 100;
@@ -466,6 +548,42 @@ TEST(SanitizerCommon, AllocatorLeakTest) {
a.TestOnlyUnmap();
}
+
+// Struct which is allocated to pass info to new threads. The new thread frees
+// it.
+struct NewThreadParams {
+ AllocatorCache *thread_cache;
+ AllocatorCache::Allocator *allocator;
+ uptr class_id;
+};
+
+// Called in a new thread. Just frees its argument.
+static void *DeallocNewThreadWorker(void *arg) {
+ NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
+ params->thread_cache->Deallocate(params->allocator, params->class_id, params);
+ return NULL;
+}
+
+// The allocator cache is supposed to be POD and zero initialized. We should be
+// able to call Deallocate on a zeroed cache, and it will self-initialize.
+TEST(Allocator, AllocatorCacheDeallocNewThread) {
+ AllocatorCache::Allocator allocator;
+ allocator.Init();
+ AllocatorCache main_cache;
+ AllocatorCache child_cache;
+ memset(&main_cache, 0, sizeof(main_cache));
+ memset(&child_cache, 0, sizeof(child_cache));
+
+ uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
+ NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
+ main_cache.Allocate(&allocator, class_id));
+ params->thread_cache = &child_cache;
+ params->allocator = &allocator;
+ params->class_id = class_id;
+ pthread_t t;
+ EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
+ EXPECT_EQ(0, pthread_join(t, 0));
+}
#endif
TEST(Allocator, Basic) {
@@ -507,4 +625,122 @@ TEST(Allocator, ScopedBuffer) {
}
}
+class IterationTestCallback {
+ public:
+ explicit IterationTestCallback(std::set<void *> *chunks)
+ : chunks_(chunks) {}
+ void operator()(void *chunk) const {
+ chunks_->insert(chunk);
+ }
+ private:
+ std::set<void *> *chunks_;
+};
+
+template <class Allocator>
+void TestSizeClassAllocatorIteration() {
+ Allocator *a = new Allocator;
+ a->Init();
+ SizeClassAllocatorLocalCache<Allocator> cache;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+
+ static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
+ 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
+
+ std::vector<void *> allocated;
+
+ // Allocate a bunch of chunks.
+ for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
+ uptr size = sizes[s];
+ if (!a->CanAllocate(size, 1)) continue;
+ // printf("s = %ld\n", size);
+ uptr n_iter = std::max((uptr)6, 80000 / size);
+ // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
+ for (uptr j = 0; j < n_iter; j++) {
+ uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
+ void *x = cache.Allocate(a, class_id0);
+ allocated.push_back(x);
+ }
+ }
+
+ std::set<void *> reported_chunks;
+ IterationTestCallback callback(&reported_chunks);
+ a->ForceLock();
+ a->ForEachChunk(callback);
+ a->ForceUnlock();
+
+ for (uptr i = 0; i < allocated.size(); i++) {
+ // Don't use EXPECT_NE. Reporting the first mismatch is enough.
+ ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
+ }
+
+ a->TestOnlyUnmap();
+ delete a;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
+ TestSizeClassAllocatorIteration<Allocator64>();
+}
+#endif
+
+TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
+ TestSizeClassAllocatorIteration<Allocator32Compact>();
+}
+
+TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
+ LargeMmapAllocator<> a;
+ a.Init();
+ AllocatorStats stats;
+ stats.Init();
+
+ static const uptr kNumAllocs = 1000;
+ char *allocated[kNumAllocs];
+ static const uptr size = 40;
+ // Allocate some.
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ allocated[i] = (char *)a.Allocate(&stats, size, 1);
+ }
+
+ std::set<void *> reported_chunks;
+ IterationTestCallback callback(&reported_chunks);
+ a.ForceLock();
+ a.ForEachChunk(callback);
+ a.ForceUnlock();
+
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ // Don't use EXPECT_NE. Reporting the first mismatch is enough.
+ ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
+ }
+}
+
+#if SANITIZER_WORDSIZE == 64
+// Regression test for out-of-memory condition in PopulateFreeList().
+TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
+ // In a world where regions are small and chunks are huge...
+ typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
+ typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
+ SpecialSizeClassMap> SpecialAllocator64;
+ const uptr kRegionSize =
+ kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
+ SpecialAllocator64 *a = new SpecialAllocator64;
+ a->Init();
+ SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
+ memset(&cache, 0, sizeof(cache));
+ cache.Init(0);
+
+ // ...one man is on a mission to overflow a region with a series of
+ // successive allocations.
+ const uptr kClassID = 107;
+ const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
+ ASSERT_LT(2 * kAllocationSize, kRegionSize);
+ ASSERT_GT(3 * kAllocationSize, kRegionSize);
+ cache.Allocate(a, kClassID);
+ EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
+ "The process has exhausted");
+ a->TestOnlyUnmap();
+ delete a;
+}
+#endif
+
#endif // #if TSAN_DEBUG==0