summaryrefslogtreecommitdiff
path: root/lib/tsan/tests
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/tests')
-rw-r--r--lib/tsan/tests/CMakeLists.txt22
-rw-r--r--lib/tsan/tests/rtl/tsan_test.cc3
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util.h2
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util_linux.cc11
-rw-r--r--lib/tsan/tests/unit/tsan_clock_test.cc453
-rw-r--r--lib/tsan/tests/unit/tsan_dense_alloc_test.cc55
-rw-r--r--lib/tsan/tests/unit/tsan_flags_test.cc146
-rw-r--r--lib/tsan/tests/unit/tsan_mman_test.cc78
-rw-r--r--lib/tsan/tests/unit/tsan_stack_test.cc104
-rw-r--r--lib/tsan/tests/unit/tsan_sync_test.cc140
-rw-r--r--lib/tsan/tests/unit/tsan_vector_test.cc2
11 files changed, 794 insertions, 222 deletions
diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt
index f73a892428593..2e830c3be7143 100644
--- a/lib/tsan/tests/CMakeLists.txt
+++ b/lib/tsan/tests/CMakeLists.txt
@@ -6,19 +6,28 @@ set_target_properties(TsanUnitTests PROPERTIES
set(TSAN_UNITTEST_CFLAGS
${TSAN_CFLAGS}
- ${COMPILER_RT_GTEST_INCLUDE_CFLAGS}
+ ${COMPILER_RT_GTEST_CFLAGS}
-I${COMPILER_RT_SOURCE_DIR}/lib
-I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl
-DGTEST_HAS_RTTI=0)
+set(TSAN_RTL_HEADERS)
+foreach (header ${TSAN_HEADERS})
+ list(APPEND TSAN_RTL_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
+endforeach()
+
# tsan_compile(obj_list, source, arch, {headers})
macro(tsan_compile obj_list source arch)
get_filename_component(basename ${source} NAME)
set(output_obj "${basename}.${arch}.o")
get_target_flags_for_arch(${arch} TARGET_CFLAGS)
+ set(COMPILE_DEPS ${TSAN_RTL_HEADERS} ${ARGN})
+ if(NOT COMPILER_RT_STANDALONE_BUILD)
+ list(APPEND COMPILE_DEPS gtest tsan)
+ endif()
clang_compile(${output_obj} ${source}
CFLAGS ${TSAN_UNITTEST_CFLAGS} ${TARGET_CFLAGS}
- DEPS gtest ${TSAN_RUNTIME_LIBRARIES} ${ARGN})
+ DEPS ${COMPILE_DEPS})
list(APPEND ${obj_list} ${output_obj})
endmacro()
@@ -31,9 +40,16 @@ macro(add_tsan_unittest testname)
tsan_compile(TEST_OBJECTS ${SOURCE} x86_64 ${TEST_HEADERS})
endforeach()
get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
+ set(TEST_DEPS ${TEST_OBJECTS})
+ if(NOT COMPILER_RT_STANDALONE_BUILD)
+ list(APPEND TEST_DEPS tsan)
+ endif()
+ # FIXME: Looks like we should link TSan with just-built runtime,
+ # and not rely on -fsanitize=thread, as these tests are essentially
+ # unit tests.
add_compiler_rt_test(TsanUnitTests ${testname}
OBJECTS ${TEST_OBJECTS}
- DEPS ${TSAN_RUNTIME_LIBRARIES} ${TEST_OBJECTS}
+ DEPS ${TEST_DEPS}
LINK_FLAGS ${TARGET_LINK_FLAGS}
-fsanitize=thread
-lstdc++ -lm)
diff --git a/lib/tsan/tests/rtl/tsan_test.cc b/lib/tsan/tests/rtl/tsan_test.cc
index 2184284d39cec..b8b9555c2bff1 100644
--- a/lib/tsan/tests/rtl/tsan_test.cc
+++ b/lib/tsan/tests/rtl/tsan_test.cc
@@ -45,6 +45,9 @@ int run_tests(int argc, char **argv) {
return res;
}
+const char *argv0;
+
int main(int argc, char **argv) {
+ argv0 = argv[0];
return run_tests(argc, argv);
}
diff --git a/lib/tsan/tests/rtl/tsan_test_util.h b/lib/tsan/tests/rtl/tsan_test_util.h
index 483a564c84750..84d277b137f07 100644
--- a/lib/tsan/tests/rtl/tsan_test_util.h
+++ b/lib/tsan/tests/rtl/tsan_test_util.h
@@ -37,7 +37,7 @@ class Mutex {
~Mutex();
void Init();
- void StaticInit(); // Emulates static initalization (tsan invisible).
+ void StaticInit(); // Emulates static initialization (tsan invisible).
void Destroy();
void Lock();
bool TryLock();
diff --git a/lib/tsan/tests/rtl/tsan_test_util_linux.cc b/lib/tsan/tests/rtl/tsan_test_util_linux.cc
index a2601486a2e1f..9298bf051af2c 100644
--- a/lib/tsan/tests/rtl/tsan_test_util_linux.cc
+++ b/lib/tsan/tests/rtl/tsan_test_util_linux.cc
@@ -10,7 +10,7 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
-// Test utils, linux implementation.
+// Test utils, Linux and FreeBSD implementation.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
@@ -263,9 +263,14 @@ void ScopedThread::Impl::HandleEvent(Event *ev) {
}
}
CHECK_NE(tsan_mop, 0);
- errno = ECHRNG;
+#if defined(__FreeBSD__)
+ const int ErrCode = ESOCKTNOSUPPORT;
+#else
+ const int ErrCode = ECHRNG;
+#endif
+ errno = ErrCode;
tsan_mop(ev->ptr);
- CHECK_EQ(errno, ECHRNG); // In no case must errno be changed.
+ CHECK_EQ(ErrCode, errno); // In no case must errno be changed.
break;
}
case Event::VPTR_UPDATE:
diff --git a/lib/tsan/tests/unit/tsan_clock_test.cc b/lib/tsan/tests/unit/tsan_clock_test.cc
index fe886e10bc57b..a1fd2b7f6e993 100644
--- a/lib/tsan/tests/unit/tsan_clock_test.cc
+++ b/lib/tsan/tests/unit/tsan_clock_test.cc
@@ -13,110 +13,419 @@
#include "tsan_clock.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
+#include <time.h>
namespace __tsan {
+ClockCache cache;
+
TEST(Clock, VectorBasic) {
- ScopedInRtl in_rtl;
- ThreadClock clk;
- CHECK_EQ(clk.size(), 0);
- clk.tick(0);
- CHECK_EQ(clk.size(), 1);
- CHECK_EQ(clk.get(0), 1);
- clk.tick(3);
- CHECK_EQ(clk.size(), 4);
- CHECK_EQ(clk.get(0), 1);
- CHECK_EQ(clk.get(1), 0);
- CHECK_EQ(clk.get(2), 0);
- CHECK_EQ(clk.get(3), 1);
- clk.tick(3);
- CHECK_EQ(clk.get(3), 2);
+ ThreadClock clk(0);
+ ASSERT_EQ(clk.size(), 1U);
+ clk.tick();
+ ASSERT_EQ(clk.size(), 1U);
+ ASSERT_EQ(clk.get(0), 1U);
+ clk.set(3, clk.get(3) + 1);
+ ASSERT_EQ(clk.size(), 4U);
+ ASSERT_EQ(clk.get(0), 1U);
+ ASSERT_EQ(clk.get(1), 0U);
+ ASSERT_EQ(clk.get(2), 0U);
+ ASSERT_EQ(clk.get(3), 1U);
+ clk.set(3, clk.get(3) + 1);
+ ASSERT_EQ(clk.get(3), 2U);
}
TEST(Clock, ChunkedBasic) {
- ScopedInRtl in_rtl;
- ThreadClock vector;
+ ThreadClock vector(0);
SyncClock chunked;
- CHECK_EQ(vector.size(), 0);
- CHECK_EQ(chunked.size(), 0);
- vector.acquire(&chunked);
- CHECK_EQ(vector.size(), 0);
- CHECK_EQ(chunked.size(), 0);
- vector.release(&chunked);
- CHECK_EQ(vector.size(), 0);
- CHECK_EQ(chunked.size(), 0);
- vector.acq_rel(&chunked);
- CHECK_EQ(vector.size(), 0);
- CHECK_EQ(chunked.size(), 0);
+ ASSERT_EQ(vector.size(), 1U);
+ ASSERT_EQ(chunked.size(), 0U);
+ vector.acquire(&cache, &chunked);
+ ASSERT_EQ(vector.size(), 1U);
+ ASSERT_EQ(chunked.size(), 0U);
+ vector.release(&cache, &chunked);
+ ASSERT_EQ(vector.size(), 1U);
+ ASSERT_EQ(chunked.size(), 1U);
+ vector.acq_rel(&cache, &chunked);
+ ASSERT_EQ(vector.size(), 1U);
+ ASSERT_EQ(chunked.size(), 1U);
+ chunked.Reset(&cache);
}
TEST(Clock, AcquireRelease) {
- ScopedInRtl in_rtl;
- ThreadClock vector1;
- vector1.tick(100);
+ ThreadClock vector1(100);
+ vector1.tick();
SyncClock chunked;
- vector1.release(&chunked);
- CHECK_EQ(chunked.size(), 101);
- ThreadClock vector2;
- vector2.acquire(&chunked);
- CHECK_EQ(vector2.size(), 101);
- CHECK_EQ(vector2.get(0), 0);
- CHECK_EQ(vector2.get(1), 0);
- CHECK_EQ(vector2.get(99), 0);
- CHECK_EQ(vector2.get(100), 1);
+ vector1.release(&cache, &chunked);
+ ASSERT_EQ(chunked.size(), 101U);
+ ThreadClock vector2(0);
+ vector2.acquire(&cache, &chunked);
+ ASSERT_EQ(vector2.size(), 101U);
+ ASSERT_EQ(vector2.get(0), 0U);
+ ASSERT_EQ(vector2.get(1), 0U);
+ ASSERT_EQ(vector2.get(99), 0U);
+ ASSERT_EQ(vector2.get(100), 1U);
+ chunked.Reset(&cache);
+}
+
+TEST(Clock, RepeatedAcquire) {
+ ThreadClock thr1(1);
+ thr1.tick();
+ ThreadClock thr2(2);
+ thr2.tick();
+
+ SyncClock sync;
+ thr1.ReleaseStore(&cache, &sync);
+
+ thr2.acquire(&cache, &sync);
+ thr2.acquire(&cache, &sync);
+
+ sync.Reset(&cache);
}
TEST(Clock, ManyThreads) {
- ScopedInRtl in_rtl;
SyncClock chunked;
- for (int i = 0; i < 100; i++) {
- ThreadClock vector;
- vector.tick(i);
- vector.release(&chunked);
- CHECK_EQ(chunked.size(), i + 1);
- vector.acquire(&chunked);
- CHECK_EQ(vector.size(), i + 1);
- }
- ThreadClock vector;
- vector.acquire(&chunked);
- CHECK_EQ(vector.size(), 100);
- for (int i = 0; i < 100; i++)
- CHECK_EQ(vector.get(i), 1);
+ for (unsigned i = 0; i < 100; i++) {
+ ThreadClock vector(0);
+ vector.tick();
+ vector.set(i, 1);
+ vector.release(&cache, &chunked);
+ ASSERT_EQ(i + 1, chunked.size());
+ vector.acquire(&cache, &chunked);
+ ASSERT_EQ(i + 1, vector.size());
+ }
+
+ for (unsigned i = 0; i < 100; i++)
+ ASSERT_EQ(1U, chunked.get(i));
+
+ ThreadClock vector(1);
+ vector.acquire(&cache, &chunked);
+ ASSERT_EQ(100U, vector.size());
+ for (unsigned i = 0; i < 100; i++)
+ ASSERT_EQ(1U, vector.get(i));
+
+ chunked.Reset(&cache);
}
TEST(Clock, DifferentSizes) {
- ScopedInRtl in_rtl;
{
- ThreadClock vector1;
- vector1.tick(10);
- ThreadClock vector2;
- vector2.tick(20);
+ ThreadClock vector1(10);
+ vector1.tick();
+ ThreadClock vector2(20);
+ vector2.tick();
{
SyncClock chunked;
- vector1.release(&chunked);
- CHECK_EQ(chunked.size(), 11);
- vector2.release(&chunked);
- CHECK_EQ(chunked.size(), 21);
+ vector1.release(&cache, &chunked);
+ ASSERT_EQ(chunked.size(), 11U);
+ vector2.release(&cache, &chunked);
+ ASSERT_EQ(chunked.size(), 21U);
+ chunked.Reset(&cache);
}
{
SyncClock chunked;
- vector2.release(&chunked);
- CHECK_EQ(chunked.size(), 21);
- vector1.release(&chunked);
- CHECK_EQ(chunked.size(), 21);
+ vector2.release(&cache, &chunked);
+ ASSERT_EQ(chunked.size(), 21U);
+ vector1.release(&cache, &chunked);
+ ASSERT_EQ(chunked.size(), 21U);
+ chunked.Reset(&cache);
}
{
SyncClock chunked;
- vector1.release(&chunked);
- vector2.acquire(&chunked);
- CHECK_EQ(vector2.size(), 21);
+ vector1.release(&cache, &chunked);
+ vector2.acquire(&cache, &chunked);
+ ASSERT_EQ(vector2.size(), 21U);
+ chunked.Reset(&cache);
}
{
SyncClock chunked;
- vector2.release(&chunked);
- vector1.acquire(&chunked);
- CHECK_EQ(vector1.size(), 21);
+ vector2.release(&cache, &chunked);
+ vector1.acquire(&cache, &chunked);
+ ASSERT_EQ(vector1.size(), 21U);
+ chunked.Reset(&cache);
+ }
+ }
+}
+
+TEST(Clock, Growth) {
+ {
+ ThreadClock vector(10);
+ vector.tick();
+ vector.set(5, 42);
+ SyncClock sync;
+ vector.release(&cache, &sync);
+ ASSERT_EQ(sync.size(), 11U);
+ ASSERT_EQ(sync.get(0), 0ULL);
+ ASSERT_EQ(sync.get(1), 0ULL);
+ ASSERT_EQ(sync.get(5), 42ULL);
+ ASSERT_EQ(sync.get(9), 0ULL);
+ ASSERT_EQ(sync.get(10), 1ULL);
+ sync.Reset(&cache);
+ }
+ {
+ ThreadClock vector1(10);
+ vector1.tick();
+ ThreadClock vector2(20);
+ vector2.tick();
+ SyncClock sync;
+ vector1.release(&cache, &sync);
+ vector2.release(&cache, &sync);
+ ASSERT_EQ(sync.size(), 21U);
+ ASSERT_EQ(sync.get(0), 0ULL);
+ ASSERT_EQ(sync.get(10), 1ULL);
+ ASSERT_EQ(sync.get(19), 0ULL);
+ ASSERT_EQ(sync.get(20), 1ULL);
+ sync.Reset(&cache);
+ }
+ {
+ ThreadClock vector(100);
+ vector.tick();
+ vector.set(5, 42);
+ vector.set(90, 84);
+ SyncClock sync;
+ vector.release(&cache, &sync);
+ ASSERT_EQ(sync.size(), 101U);
+ ASSERT_EQ(sync.get(0), 0ULL);
+ ASSERT_EQ(sync.get(1), 0ULL);
+ ASSERT_EQ(sync.get(5), 42ULL);
+ ASSERT_EQ(sync.get(60), 0ULL);
+ ASSERT_EQ(sync.get(70), 0ULL);
+ ASSERT_EQ(sync.get(90), 84ULL);
+ ASSERT_EQ(sync.get(99), 0ULL);
+ ASSERT_EQ(sync.get(100), 1ULL);
+ sync.Reset(&cache);
+ }
+ {
+ ThreadClock vector1(10);
+ vector1.tick();
+ ThreadClock vector2(100);
+ vector2.tick();
+ SyncClock sync;
+ vector1.release(&cache, &sync);
+ vector2.release(&cache, &sync);
+ ASSERT_EQ(sync.size(), 101U);
+ ASSERT_EQ(sync.get(0), 0ULL);
+ ASSERT_EQ(sync.get(10), 1ULL);
+ ASSERT_EQ(sync.get(99), 0ULL);
+ ASSERT_EQ(sync.get(100), 1ULL);
+ sync.Reset(&cache);
+ }
+}
+
+const int kThreads = 4;
+const int kClocks = 4;
+
+// SimpleSyncClock and SimpleThreadClock implement the same thing as
+// SyncClock and ThreadClock, but in a very simple way.
+struct SimpleSyncClock {
+ u64 clock[kThreads];
+ uptr size;
+
+ SimpleSyncClock() {
+ Reset();
+ }
+
+ void Reset() {
+ size = 0;
+ for (uptr i = 0; i < kThreads; i++)
+ clock[i] = 0;
+ }
+
+ bool verify(const SyncClock *other) const {
+ for (uptr i = 0; i < min(size, other->size()); i++) {
+ if (clock[i] != other->get(i))
+ return false;
+ }
+ for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
+ if (i < size && clock[i] != 0)
+ return false;
+ if (i < other->size() && other->get(i) != 0)
+ return false;
+ }
+ return true;
+ }
+};
+
+struct SimpleThreadClock {
+ u64 clock[kThreads];
+ uptr size;
+ unsigned tid;
+
+ explicit SimpleThreadClock(unsigned tid) {
+ this->tid = tid;
+ size = tid + 1;
+ for (uptr i = 0; i < kThreads; i++)
+ clock[i] = 0;
+ }
+
+ void tick() {
+ clock[tid]++;
+ }
+
+ void acquire(const SimpleSyncClock *src) {
+ if (size < src->size)
+ size = src->size;
+ for (uptr i = 0; i < kThreads; i++)
+ clock[i] = max(clock[i], src->clock[i]);
+ }
+
+ void release(SimpleSyncClock *dst) const {
+ if (dst->size < size)
+ dst->size = size;
+ for (uptr i = 0; i < kThreads; i++)
+ dst->clock[i] = max(dst->clock[i], clock[i]);
+ }
+
+ void acq_rel(SimpleSyncClock *dst) {
+ acquire(dst);
+ release(dst);
+ }
+
+ void ReleaseStore(SimpleSyncClock *dst) const {
+ if (dst->size < size)
+ dst->size = size;
+ for (uptr i = 0; i < kThreads; i++)
+ dst->clock[i] = clock[i];
+ }
+
+ bool verify(const ThreadClock *other) const {
+ for (uptr i = 0; i < min(size, other->size()); i++) {
+ if (clock[i] != other->get(i))
+ return false;
+ }
+ for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
+ if (i < size && clock[i] != 0)
+ return false;
+ if (i < other->size() && other->get(i) != 0)
+ return false;
}
+ return true;
+ }
+};
+
+static bool ClockFuzzer(bool printing) {
+ // Create kThreads thread clocks.
+ SimpleThreadClock *thr0[kThreads];
+ ThreadClock *thr1[kThreads];
+ unsigned reused[kThreads];
+ for (unsigned i = 0; i < kThreads; i++) {
+ reused[i] = 0;
+ thr0[i] = new SimpleThreadClock(i);
+ thr1[i] = new ThreadClock(i, reused[i]);
+ }
+
+ // Create kClocks sync clocks.
+ SimpleSyncClock *sync0[kClocks];
+ SyncClock *sync1[kClocks];
+ for (unsigned i = 0; i < kClocks; i++) {
+ sync0[i] = new SimpleSyncClock();
+ sync1[i] = new SyncClock();
+ }
+
+ // Do N random operations (acquire, release, etc) and compare results
+ // for SimpleThread/SyncClock and real Thread/SyncClock.
+ for (int i = 0; i < 10000; i++) {
+ unsigned tid = rand() % kThreads;
+ unsigned cid = rand() % kClocks;
+ thr0[tid]->tick();
+ thr1[tid]->tick();
+
+ switch (rand() % 6) {
+ case 0:
+ if (printing)
+ printf("acquire thr%d <- clk%d\n", tid, cid);
+ thr0[tid]->acquire(sync0[cid]);
+ thr1[tid]->acquire(&cache, sync1[cid]);
+ break;
+ case 1:
+ if (printing)
+ printf("release thr%d -> clk%d\n", tid, cid);
+ thr0[tid]->release(sync0[cid]);
+ thr1[tid]->release(&cache, sync1[cid]);
+ break;
+ case 2:
+ if (printing)
+ printf("acq_rel thr%d <> clk%d\n", tid, cid);
+ thr0[tid]->acq_rel(sync0[cid]);
+ thr1[tid]->acq_rel(&cache, sync1[cid]);
+ break;
+ case 3:
+ if (printing)
+ printf("rel_str thr%d >> clk%d\n", tid, cid);
+ thr0[tid]->ReleaseStore(sync0[cid]);
+ thr1[tid]->ReleaseStore(&cache, sync1[cid]);
+ break;
+ case 4:
+ if (printing)
+ printf("reset clk%d\n", cid);
+ sync0[cid]->Reset();
+ sync1[cid]->Reset(&cache);
+ break;
+ case 5:
+ if (printing)
+ printf("reset thr%d\n", tid);
+ u64 epoch = thr0[tid]->clock[tid] + 1;
+ reused[tid]++;
+ delete thr0[tid];
+ thr0[tid] = new SimpleThreadClock(tid);
+ thr0[tid]->clock[tid] = epoch;
+ delete thr1[tid];
+ thr1[tid] = new ThreadClock(tid, reused[tid]);
+ thr1[tid]->set(epoch);
+ break;
+ }
+
+ if (printing) {
+ for (unsigned i = 0; i < kThreads; i++) {
+ printf("thr%d: ", i);
+ thr1[i]->DebugDump(printf);
+ printf("\n");
+ }
+ for (unsigned i = 0; i < kClocks; i++) {
+ printf("clk%d: ", i);
+ sync1[i]->DebugDump(printf);
+ printf("\n");
+ }
+
+ printf("\n");
+ }
+
+ if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) {
+ if (!printing)
+ return false;
+ printf("differs with model:\n");
+ for (unsigned i = 0; i < kThreads; i++) {
+ printf("thr%d: clock=[", i);
+ for (uptr j = 0; j < thr0[i]->size; j++)
+ printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]);
+ printf("]\n");
+ }
+ for (unsigned i = 0; i < kClocks; i++) {
+ printf("clk%d: clock=[", i);
+ for (uptr j = 0; j < sync0[i]->size; j++)
+ printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]);
+ printf("]\n");
+ }
+ return false;
+ }
+ }
+
+ for (unsigned i = 0; i < kClocks; i++) {
+ sync1[i]->Reset(&cache);
+ }
+ return true;
+}
+
+TEST(Clock, Fuzzer) {
+ timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ int seed = ts.tv_sec + ts.tv_nsec;
+ printf("seed=%d\n", seed);
+ srand(seed);
+ if (!ClockFuzzer(false)) {
+ // Redo the test with the same seed, but logging operations.
+ srand(seed);
+ ClockFuzzer(true);
+ ASSERT_TRUE(false);
}
}
diff --git a/lib/tsan/tests/unit/tsan_dense_alloc_test.cc b/lib/tsan/tests/unit/tsan_dense_alloc_test.cc
new file mode 100644
index 0000000000000..e848e48c6641d
--- /dev/null
+++ b/lib/tsan/tests/unit/tsan_dense_alloc_test.cc
@@ -0,0 +1,55 @@
+//===-- tsan_dense_alloc_test.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_dense_alloc.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "gtest/gtest.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <map>
+
+namespace __tsan {
+
+TEST(DenseSlabAlloc, Basic) {
+ typedef DenseSlabAlloc<int, 128, 128> Alloc;
+ typedef Alloc::Cache Cache;
+ typedef Alloc::IndexT IndexT;
+ const int N = 1000;
+
+ Alloc alloc;
+ Cache cache;
+ alloc.InitCache(&cache);
+
+ IndexT blocks[N];
+ for (int ntry = 0; ntry < 3; ntry++) {
+ for (int i = 0; i < N; i++) {
+ IndexT idx = alloc.Alloc(&cache);
+ blocks[i] = idx;
+ EXPECT_NE(idx, 0U);
+ int *v = alloc.Map(idx);
+ *v = i;
+ }
+
+ for (int i = 0; i < N; i++) {
+ IndexT idx = blocks[i];
+ int *v = alloc.Map(idx);
+ EXPECT_EQ(*v, i);
+ alloc.Free(&cache, idx);
+ }
+
+ alloc.FlushCache(&cache);
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tests/unit/tsan_flags_test.cc b/lib/tsan/tests/unit/tsan_flags_test.cc
index ffb9c55b605fe..22610c0dc42f5 100644
--- a/lib/tsan/tests/unit/tsan_flags_test.cc
+++ b/lib/tsan/tests/unit/tsan_flags_test.cc
@@ -13,11 +13,11 @@
#include "tsan_flags.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
+#include <string>
namespace __tsan {
TEST(Flags, Basic) {
- ScopedInRtl in_rtl;
// At least should not crash.
Flags f;
InitializeFlags(&f, 0);
@@ -25,7 +25,6 @@ TEST(Flags, Basic) {
}
TEST(Flags, DefaultValues) {
- ScopedInRtl in_rtl;
Flags f;
f.enable_annotations = false;
@@ -35,4 +34,147 @@ TEST(Flags, DefaultValues) {
EXPECT_EQ(true, f.enable_annotations);
}
+static const char *options1 =
+ " enable_annotations=0"
+ " suppress_equal_stacks=0"
+ " suppress_equal_addresses=0"
+ " report_bugs=0"
+ " report_thread_leaks=0"
+ " report_destroy_locked=0"
+ " report_mutex_bugs=0"
+ " report_signal_unsafe=0"
+ " report_atomic_races=0"
+ " force_seq_cst_atomics=0"
+ " print_benign=0"
+ " exitcode=111"
+ " halt_on_error=0"
+ " atexit_sleep_ms=222"
+ " profile_memory=qqq"
+ " flush_memory_ms=444"
+ " flush_symbolizer_ms=555"
+ " memory_limit_mb=666"
+ " stop_on_start=0"
+ " running_on_valgrind=0"
+ " history_size=5"
+ " io_sync=1"
+ " die_after_fork=true"
+ "";
+
+static const char *options2 =
+ " enable_annotations=true"
+ " suppress_equal_stacks=true"
+ " suppress_equal_addresses=true"
+ " report_bugs=true"
+ " report_thread_leaks=true"
+ " report_destroy_locked=true"
+ " report_mutex_bugs=true"
+ " report_signal_unsafe=true"
+ " report_atomic_races=true"
+ " force_seq_cst_atomics=true"
+ " print_benign=true"
+ " exitcode=222"
+ " halt_on_error=true"
+ " atexit_sleep_ms=123"
+ " profile_memory=bbbbb"
+ " flush_memory_ms=234"
+ " flush_symbolizer_ms=345"
+ " memory_limit_mb=456"
+ " stop_on_start=true"
+ " running_on_valgrind=true"
+ " history_size=6"
+ " io_sync=2"
+ " die_after_fork=false"
+ "";
+
+void VerifyOptions1(Flags *f) {
+ EXPECT_EQ(f->enable_annotations, 0);
+ EXPECT_EQ(f->suppress_equal_stacks, 0);
+ EXPECT_EQ(f->suppress_equal_addresses, 0);
+ EXPECT_EQ(f->report_bugs, 0);
+ EXPECT_EQ(f->report_thread_leaks, 0);
+ EXPECT_EQ(f->report_destroy_locked, 0);
+ EXPECT_EQ(f->report_mutex_bugs, 0);
+ EXPECT_EQ(f->report_signal_unsafe, 0);
+ EXPECT_EQ(f->report_atomic_races, 0);
+ EXPECT_EQ(f->force_seq_cst_atomics, 0);
+ EXPECT_EQ(f->print_benign, 0);
+ EXPECT_EQ(f->exitcode, 111);
+ EXPECT_EQ(f->halt_on_error, 0);
+ EXPECT_EQ(f->atexit_sleep_ms, 222);
+ EXPECT_EQ(f->profile_memory, std::string("qqq"));
+ EXPECT_EQ(f->flush_memory_ms, 444);
+ EXPECT_EQ(f->flush_symbolizer_ms, 555);
+ EXPECT_EQ(f->memory_limit_mb, 666);
+ EXPECT_EQ(f->stop_on_start, 0);
+ EXPECT_EQ(f->running_on_valgrind, 0);
+ EXPECT_EQ(f->history_size, 5);
+ EXPECT_EQ(f->io_sync, 1);
+ EXPECT_EQ(f->die_after_fork, true);
+}
+
+void VerifyOptions2(Flags *f) {
+ EXPECT_EQ(f->enable_annotations, true);
+ EXPECT_EQ(f->suppress_equal_stacks, true);
+ EXPECT_EQ(f->suppress_equal_addresses, true);
+ EXPECT_EQ(f->report_bugs, true);
+ EXPECT_EQ(f->report_thread_leaks, true);
+ EXPECT_EQ(f->report_destroy_locked, true);
+ EXPECT_EQ(f->report_mutex_bugs, true);
+ EXPECT_EQ(f->report_signal_unsafe, true);
+ EXPECT_EQ(f->report_atomic_races, true);
+ EXPECT_EQ(f->force_seq_cst_atomics, true);
+ EXPECT_EQ(f->print_benign, true);
+ EXPECT_EQ(f->exitcode, 222);
+ EXPECT_EQ(f->halt_on_error, true);
+ EXPECT_EQ(f->atexit_sleep_ms, 123);
+ EXPECT_EQ(f->profile_memory, std::string("bbbbb"));
+ EXPECT_EQ(f->flush_memory_ms, 234);
+ EXPECT_EQ(f->flush_symbolizer_ms, 345);
+ EXPECT_EQ(f->memory_limit_mb, 456);
+ EXPECT_EQ(f->stop_on_start, true);
+ EXPECT_EQ(f->running_on_valgrind, true);
+ EXPECT_EQ(f->history_size, 6);
+ EXPECT_EQ(f->io_sync, 2);
+ EXPECT_EQ(f->die_after_fork, false);
+}
+
+static const char *test_default_options;
+extern "C" const char *__tsan_default_options() {
+ return test_default_options;
+}
+
+TEST(Flags, ParseDefaultOptions) {
+ Flags f;
+
+ test_default_options = options1;
+ InitializeFlags(&f, "");
+ VerifyOptions1(&f);
+
+ test_default_options = options2;
+ InitializeFlags(&f, "");
+ VerifyOptions2(&f);
+}
+
+TEST(Flags, ParseEnvOptions) {
+ Flags f;
+
+ InitializeFlags(&f, options1);
+ VerifyOptions1(&f);
+
+ InitializeFlags(&f, options2);
+ VerifyOptions2(&f);
+}
+
+TEST(Flags, ParsePriority) {
+ Flags f;
+
+ test_default_options = options2;
+ InitializeFlags(&f, options1);
+ VerifyOptions1(&f);
+
+ test_default_options = options1;
+ InitializeFlags(&f, options2);
+ VerifyOptions2(&f);
+}
+
} // namespace __tsan
diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc
index e1ad7ac51ad67..d969989df7685 100644
--- a/lib/tsan/tests/unit/tsan_mman_test.cc
+++ b/lib/tsan/tests/unit/tsan_mman_test.cc
@@ -11,24 +11,14 @@
//
//===----------------------------------------------------------------------===//
#include <limits>
+#include <sanitizer/allocator_interface.h>
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
-extern "C" {
-uptr __tsan_get_current_allocated_bytes();
-uptr __tsan_get_heap_size();
-uptr __tsan_get_free_bytes();
-uptr __tsan_get_unmapped_bytes();
-uptr __tsan_get_estimated_allocated_size(uptr size);
-bool __tsan_get_ownership(void *p);
-uptr __tsan_get_allocated_size(void *p);
-}
-
namespace __tsan {
TEST(Mman, Internal) {
- ScopedInRtl in_rtl;
char *p = (char*)internal_alloc(MBlockScopedBuf, 10);
EXPECT_NE(p, (char*)0);
char *p2 = (char*)internal_alloc(MBlockScopedBuf, 20);
@@ -45,7 +35,6 @@ TEST(Mman, Internal) {
}
TEST(Mman, User) {
- ScopedInRtl in_rtl;
ThreadState *thr = cur_thread();
uptr pc = 0;
char *p = (char*)user_alloc(thr, pc, 10);
@@ -53,26 +42,13 @@ TEST(Mman, User) {
char *p2 = (char*)user_alloc(thr, pc, 20);
EXPECT_NE(p2, (char*)0);
EXPECT_NE(p2, p);
- MBlock *b = user_mblock(thr, p);
- EXPECT_NE(b, (MBlock*)0);
- EXPECT_EQ(b->Size(), (uptr)10);
- MBlock *b2 = user_mblock(thr, p2);
- EXPECT_NE(b2, (MBlock*)0);
- EXPECT_EQ(b2->Size(), (uptr)20);
- for (int i = 0; i < 10; i++) {
- p[i] = 42;
- EXPECT_EQ(b, user_mblock(thr, p + i));
- }
- for (int i = 0; i < 20; i++) {
- ((char*)p2)[i] = 42;
- EXPECT_EQ(b2, user_mblock(thr, p2 + i));
- }
+ EXPECT_EQ(10U, user_alloc_usable_size(p));
+ EXPECT_EQ(20U, user_alloc_usable_size(p2));
user_free(thr, pc, p);
user_free(thr, pc, p2);
}
TEST(Mman, UserRealloc) {
- ScopedInRtl in_rtl;
ThreadState *thr = cur_thread();
uptr pc = 0;
{
@@ -118,49 +94,53 @@ TEST(Mman, UserRealloc) {
}
TEST(Mman, UsableSize) {
- ScopedInRtl in_rtl;
ThreadState *thr = cur_thread();
uptr pc = 0;
char *p = (char*)user_alloc(thr, pc, 10);
char *p2 = (char*)user_alloc(thr, pc, 20);
- EXPECT_EQ(0U, user_alloc_usable_size(thr, pc, NULL));
- EXPECT_EQ(10U, user_alloc_usable_size(thr, pc, p));
- EXPECT_EQ(20U, user_alloc_usable_size(thr, pc, p2));
+ EXPECT_EQ(0U, user_alloc_usable_size(NULL));
+ EXPECT_EQ(10U, user_alloc_usable_size(p));
+ EXPECT_EQ(20U, user_alloc_usable_size(p2));
user_free(thr, pc, p);
user_free(thr, pc, p2);
+ EXPECT_EQ(0U, user_alloc_usable_size((void*)0x4123));
}
TEST(Mman, Stats) {
- ScopedInRtl in_rtl;
ThreadState *thr = cur_thread();
- uptr alloc0 = __tsan_get_current_allocated_bytes();
- uptr heap0 = __tsan_get_heap_size();
- uptr free0 = __tsan_get_free_bytes();
- uptr unmapped0 = __tsan_get_unmapped_bytes();
+ uptr alloc0 = __sanitizer_get_current_allocated_bytes();
+ uptr heap0 = __sanitizer_get_heap_size();
+ uptr free0 = __sanitizer_get_free_bytes();
+ uptr unmapped0 = __sanitizer_get_unmapped_bytes();
- EXPECT_EQ(__tsan_get_estimated_allocated_size(10), (uptr)10);
- EXPECT_EQ(__tsan_get_estimated_allocated_size(20), (uptr)20);
- EXPECT_EQ(__tsan_get_estimated_allocated_size(100), (uptr)100);
+ EXPECT_EQ(10U, __sanitizer_get_estimated_allocated_size(10));
+ EXPECT_EQ(20U, __sanitizer_get_estimated_allocated_size(20));
+ EXPECT_EQ(100U, __sanitizer_get_estimated_allocated_size(100));
char *p = (char*)user_alloc(thr, 0, 10);
- EXPECT_EQ(__tsan_get_ownership(p), true);
- EXPECT_EQ(__tsan_get_allocated_size(p), (uptr)10);
+ EXPECT_TRUE(__sanitizer_get_ownership(p));
+ EXPECT_EQ(10U, __sanitizer_get_allocated_size(p));
- EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0 + 16);
- EXPECT_GE(__tsan_get_heap_size(), heap0);
- EXPECT_EQ(__tsan_get_free_bytes(), free0);
- EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0);
+ EXPECT_EQ(alloc0 + 16, __sanitizer_get_current_allocated_bytes());
+ EXPECT_GE(__sanitizer_get_heap_size(), heap0);
+ EXPECT_EQ(free0, __sanitizer_get_free_bytes());
+ EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
user_free(thr, 0, p);
- EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0);
- EXPECT_GE(__tsan_get_heap_size(), heap0);
- EXPECT_EQ(__tsan_get_free_bytes(), free0);
- EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0);
+ EXPECT_EQ(alloc0, __sanitizer_get_current_allocated_bytes());
+ EXPECT_GE(__sanitizer_get_heap_size(), heap0);
+ EXPECT_EQ(free0, __sanitizer_get_free_bytes());
+ EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
}
TEST(Mman, CallocOverflow) {
+#if TSAN_DEBUG
+ // EXPECT_DEATH clones a thread with 4K stack,
+ // which is overflown by tsan memory accesses functions in debug mode.
+ return;
+#endif
size_t kArraySize = 4096;
volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
diff --git a/lib/tsan/tests/unit/tsan_stack_test.cc b/lib/tsan/tests/unit/tsan_stack_test.cc
index 9aa2967628cf9..92e035d8d0004 100644
--- a/lib/tsan/tests/unit/tsan_stack_test.cc
+++ b/lib/tsan/tests/unit/tsan_stack_test.cc
@@ -17,73 +17,79 @@
namespace __tsan {
-static void TestStackTrace(StackTrace *trace) {
- ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0);
+template <typename StackTraceTy>
+static void TestStackTrace(StackTraceTy *trace) {
+ ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0);
uptr stack[128];
thr.shadow_stack = &stack[0];
thr.shadow_stack_pos = &stack[0];
thr.shadow_stack_end = &stack[128];
- trace->ObtainCurrent(&thr, 0);
- EXPECT_EQ(trace->Size(), (uptr)0);
+ ObtainCurrentStack(&thr, 0, trace);
+ EXPECT_EQ(0U, trace->size);
- trace->ObtainCurrent(&thr, 42);
- EXPECT_EQ(trace->Size(), (uptr)1);
- EXPECT_EQ(trace->Get(0), (uptr)42);
+ ObtainCurrentStack(&thr, 42, trace);
+ EXPECT_EQ(1U, trace->size);
+ EXPECT_EQ(42U, trace->trace[0]);
*thr.shadow_stack_pos++ = 100;
*thr.shadow_stack_pos++ = 101;
- trace->ObtainCurrent(&thr, 0);
- EXPECT_EQ(trace->Size(), (uptr)2);
- EXPECT_EQ(trace->Get(0), (uptr)100);
- EXPECT_EQ(trace->Get(1), (uptr)101);
+ ObtainCurrentStack(&thr, 0, trace);
+ EXPECT_EQ(2U, trace->size);
+ EXPECT_EQ(100U, trace->trace[0]);
+ EXPECT_EQ(101U, trace->trace[1]);
- trace->ObtainCurrent(&thr, 42);
- EXPECT_EQ(trace->Size(), (uptr)3);
- EXPECT_EQ(trace->Get(0), (uptr)100);
- EXPECT_EQ(trace->Get(1), (uptr)101);
- EXPECT_EQ(trace->Get(2), (uptr)42);
+ ObtainCurrentStack(&thr, 42, trace);
+ EXPECT_EQ(3U, trace->size);
+ EXPECT_EQ(100U, trace->trace[0]);
+ EXPECT_EQ(101U, trace->trace[1]);
+ EXPECT_EQ(42U, trace->trace[2]);
}
-TEST(StackTrace, Basic) {
- ScopedInRtl in_rtl;
- StackTrace trace;
- TestStackTrace(&trace);
-}
+template<typename StackTraceTy>
+static void TestTrim(StackTraceTy *trace) {
+ ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0);
+ const uptr kShadowStackSize = 2 * kStackTraceMax;
+ uptr stack[kShadowStackSize];
+ thr.shadow_stack = &stack[0];
+ thr.shadow_stack_pos = &stack[0];
+ thr.shadow_stack_end = &stack[kShadowStackSize];
-TEST(StackTrace, StaticBasic) {
- ScopedInRtl in_rtl;
- uptr buf[10];
- StackTrace trace1(buf, 10);
- TestStackTrace(&trace1);
- StackTrace trace2(buf, 3);
- TestStackTrace(&trace2);
-}
+ for (uptr i = 0; i < kShadowStackSize; ++i)
+ *thr.shadow_stack_pos++ = 100 + i;
-TEST(StackTrace, StaticTrim) {
- ScopedInRtl in_rtl;
- uptr buf[2];
- StackTrace trace(buf, 2);
+ ObtainCurrentStack(&thr, 0, trace);
+ EXPECT_EQ(kStackTraceMax, trace->size);
+ for (uptr i = 0; i < kStackTraceMax; i++) {
+ EXPECT_EQ(100 + kStackTraceMax + i, trace->trace[i]);
+ }
- ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0);
- uptr stack[128];
- thr.shadow_stack = &stack[0];
- thr.shadow_stack_pos = &stack[0];
- thr.shadow_stack_end = &stack[128];
+ ObtainCurrentStack(&thr, 42, trace);
+ EXPECT_EQ(kStackTraceMax, trace->size);
+ for (uptr i = 0; i < kStackTraceMax - 1; i++) {
+ EXPECT_EQ(101 + kStackTraceMax + i, trace->trace[i]);
+ }
+ EXPECT_EQ(42U, trace->trace[kStackTraceMax - 1]);
+}
- *thr.shadow_stack_pos++ = 100;
- *thr.shadow_stack_pos++ = 101;
- *thr.shadow_stack_pos++ = 102;
- trace.ObtainCurrent(&thr, 0);
- EXPECT_EQ(trace.Size(), (uptr)2);
- EXPECT_EQ(trace.Get(0), (uptr)101);
- EXPECT_EQ(trace.Get(1), (uptr)102);
+TEST(StackTrace, BasicVarSize) {
+ VarSizeStackTrace trace;
+ TestStackTrace(&trace);
+}
- trace.ObtainCurrent(&thr, 42);
- EXPECT_EQ(trace.Size(), (uptr)2);
- EXPECT_EQ(trace.Get(0), (uptr)102);
- EXPECT_EQ(trace.Get(1), (uptr)42);
+TEST(StackTrace, BasicBuffered) {
+ BufferedStackTrace trace;
+ TestStackTrace(&trace);
+}
+
+TEST(StackTrace, TrimVarSize) {
+ VarSizeStackTrace trace;
+ TestTrim(&trace);
}
+TEST(StackTrace, TrimBuffered) {
+ BufferedStackTrace trace;
+ TestTrim(&trace);
+}
} // namespace __tsan
diff --git a/lib/tsan/tests/unit/tsan_sync_test.cc b/lib/tsan/tests/unit/tsan_sync_test.cc
index dddf0b2908836..d3616a1a4b811 100644
--- a/lib/tsan/tests/unit/tsan_sync_test.cc
+++ b/lib/tsan/tests/unit/tsan_sync_test.cc
@@ -12,54 +12,112 @@
//===----------------------------------------------------------------------===//
#include "tsan_sync.h"
#include "tsan_rtl.h"
-#include "tsan_mman.h"
#include "gtest/gtest.h"
-#include <stdlib.h>
-#include <stdint.h>
-#include <map>
-
namespace __tsan {
-TEST(Sync, Table) {
- const uintptr_t kIters = 512*1024;
- const uintptr_t kRange = 10000;
+TEST(MetaMap, Basic) {
+ ThreadState *thr = cur_thread();
+ MetaMap *m = &ctx->metamap;
+ u64 block[1] = {}; // fake malloc block
+ m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
+ MBlock *mb = m->GetBlock((uptr)&block[0]);
+ EXPECT_NE(mb, (MBlock*)0);
+ EXPECT_EQ(mb->siz, 1 * sizeof(u64));
+ EXPECT_EQ(mb->tid, thr->tid);
+ uptr sz = m->FreeBlock(thr, 0, (uptr)&block[0]);
+ EXPECT_EQ(sz, 1 * sizeof(u64));
+ mb = m->GetBlock((uptr)&block[0]);
+ EXPECT_EQ(mb, (MBlock*)0);
+}
- ScopedInRtl in_rtl;
+TEST(MetaMap, FreeRange) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
+ MetaMap *m = &ctx->metamap;
+ u64 block[4] = {}; // fake malloc block
+ m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
+ m->AllocBlock(thr, 0, (uptr)&block[1], 3 * sizeof(u64));
+ MBlock *mb1 = m->GetBlock((uptr)&block[0]);
+ EXPECT_EQ(mb1->siz, 1 * sizeof(u64));
+ MBlock *mb2 = m->GetBlock((uptr)&block[1]);
+ EXPECT_EQ(mb2->siz, 3 * sizeof(u64));
+ m->FreeRange(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
+ mb1 = m->GetBlock((uptr)&block[0]);
+ EXPECT_EQ(mb1, (MBlock*)0);
+ mb2 = m->GetBlock((uptr)&block[1]);
+ EXPECT_EQ(mb2, (MBlock*)0);
+}
- SyncTab tab;
- SyncVar *golden[kRange] = {};
- unsigned seed = 0;
- for (uintptr_t i = 0; i < kIters; i++) {
- uintptr_t addr = rand_r(&seed) % (kRange - 1) + 1;
- if (rand_r(&seed) % 2) {
- // Get or add.
- SyncVar *v = tab.GetOrCreateAndLock(thr, pc, addr, true);
- EXPECT_TRUE(golden[addr] == 0 || golden[addr] == v);
- EXPECT_EQ(v->addr, addr);
- golden[addr] = v;
- v->mtx.Unlock();
- } else {
- // Remove.
- SyncVar *v = tab.GetAndRemove(thr, pc, addr);
- EXPECT_EQ(golden[addr], v);
- if (v) {
- EXPECT_EQ(v->addr, addr);
- golden[addr] = 0;
- DestroyAndFree(v);
- }
- }
- }
- for (uintptr_t addr = 0; addr < kRange; addr++) {
- if (golden[addr] == 0)
- continue;
- SyncVar *v = tab.GetAndRemove(thr, pc, addr);
- EXPECT_EQ(v, golden[addr]);
- EXPECT_EQ(v->addr, addr);
- DestroyAndFree(v);
- }
+TEST(MetaMap, Sync) {
+ ThreadState *thr = cur_thread();
+ MetaMap *m = &ctx->metamap;
+ u64 block[4] = {}; // fake malloc block
+ m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
+ SyncVar *s1 = m->GetIfExistsAndLock((uptr)&block[0]);
+ EXPECT_EQ(s1, (SyncVar*)0);
+ s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true);
+ EXPECT_NE(s1, (SyncVar*)0);
+ EXPECT_EQ(s1->addr, (uptr)&block[0]);
+ s1->mtx.Unlock();
+ SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[1], false);
+ EXPECT_NE(s2, (SyncVar*)0);
+ EXPECT_EQ(s2->addr, (uptr)&block[1]);
+ s2->mtx.ReadUnlock();
+ m->FreeBlock(thr, 0, (uptr)&block[0]);
+ s1 = m->GetIfExistsAndLock((uptr)&block[0]);
+ EXPECT_EQ(s1, (SyncVar*)0);
+ s2 = m->GetIfExistsAndLock((uptr)&block[1]);
+ EXPECT_EQ(s2, (SyncVar*)0);
+ m->OnThreadIdle(thr);
+}
+
+TEST(MetaMap, MoveMemory) {
+ ThreadState *thr = cur_thread();
+ MetaMap *m = &ctx->metamap;
+ u64 block1[4] = {}; // fake malloc block
+ u64 block2[4] = {}; // fake malloc block
+ m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64));
+ m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64));
+ SyncVar *s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[0], true);
+ s1->mtx.Unlock();
+ SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[1], true);
+ s2->mtx.Unlock();
+ m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64));
+ MBlock *mb1 = m->GetBlock((uptr)&block1[0]);
+ EXPECT_EQ(mb1, (MBlock*)0);
+ MBlock *mb2 = m->GetBlock((uptr)&block1[3]);
+ EXPECT_EQ(mb2, (MBlock*)0);
+ mb1 = m->GetBlock((uptr)&block2[0]);
+ EXPECT_NE(mb1, (MBlock*)0);
+ EXPECT_EQ(mb1->siz, 3 * sizeof(u64));
+ mb2 = m->GetBlock((uptr)&block2[3]);
+ EXPECT_NE(mb2, (MBlock*)0);
+ EXPECT_EQ(mb2->siz, 1 * sizeof(u64));
+ s1 = m->GetIfExistsAndLock((uptr)&block1[0]);
+ EXPECT_EQ(s1, (SyncVar*)0);
+ s2 = m->GetIfExistsAndLock((uptr)&block1[1]);
+ EXPECT_EQ(s2, (SyncVar*)0);
+ s1 = m->GetIfExistsAndLock((uptr)&block2[0]);
+ EXPECT_NE(s1, (SyncVar*)0);
+ EXPECT_EQ(s1->addr, (uptr)&block2[0]);
+ s1->mtx.Unlock();
+ s2 = m->GetIfExistsAndLock((uptr)&block2[1]);
+ EXPECT_NE(s2, (SyncVar*)0);
+ EXPECT_EQ(s2->addr, (uptr)&block2[1]);
+ s2->mtx.Unlock();
+ m->FreeRange(thr, 0, (uptr)&block2[0], 4 * sizeof(u64));
+}
+
+TEST(MetaMap, ResetSync) {
+ ThreadState *thr = cur_thread();
+ MetaMap *m = &ctx->metamap;
+ u64 block[1] = {}; // fake malloc block
+ m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
+ SyncVar *s = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true);
+ s->Reset(thr);
+ s->mtx.Unlock();
+ uptr sz = m->FreeBlock(thr, 0, (uptr)&block[0]);
+ EXPECT_EQ(sz, 1 * sizeof(u64));
}
} // namespace __tsan
diff --git a/lib/tsan/tests/unit/tsan_vector_test.cc b/lib/tsan/tests/unit/tsan_vector_test.cc
index cfef6e528ff20..c54ac1ee6de94 100644
--- a/lib/tsan/tests/unit/tsan_vector_test.cc
+++ b/lib/tsan/tests/unit/tsan_vector_test.cc
@@ -17,7 +17,6 @@
namespace __tsan {
TEST(Vector, Basic) {
- ScopedInRtl in_rtl;
Vector<int> v(MBlockScopedBuf);
EXPECT_EQ(v.Size(), (uptr)0);
v.PushBack(42);
@@ -30,7 +29,6 @@ TEST(Vector, Basic) {
}
TEST(Vector, Stride) {
- ScopedInRtl in_rtl;
Vector<int> v(MBlockScopedBuf);
for (int i = 0; i < 1000; i++) {
v.PushBack(i);