summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_rtl_thread.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/rtl/tsan_rtl_thread.cc')
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc92
1 files changed, 68 insertions, 24 deletions
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index f7d5f13dca78c..3597759278345 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -35,7 +35,7 @@ static void MaybeReportThreadLeak(ThreadContext *tctx) {
return;
ScopedReport rep(ReportTypeThreadLeak);
rep.AddThread(tctx);
- OutputReport(rep);
+ OutputReport(CTX(), rep);
}
void ThreadFinalize(ThreadState *thr) {
@@ -52,6 +52,23 @@ void ThreadFinalize(ThreadState *thr) {
}
}
+int ThreadCount(ThreadState *thr) {
+ CHECK_GT(thr->in_rtl, 0);
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ int cnt = 0;
+ for (unsigned i = 0; i < kMaxTid; i++) {
+ ThreadContext *tctx = ctx->threads[i];
+ if (tctx == 0)
+ continue;
+ if (tctx->status != ThreadStatusCreated
+ && tctx->status != ThreadStatusRunning)
+ continue;
+ cnt++;
+ }
+ return cnt;
+}
+
static void ThreadDead(ThreadState *thr, ThreadContext *tctx) {
Context *ctx = CTX();
CHECK_GT(thr->in_rtl, 0);
@@ -81,8 +98,9 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
ThreadContext *tctx = 0;
if (ctx->dead_list_size > kThreadQuarantineSize
|| ctx->thread_seq >= kMaxTid) {
+ // Reusing old thread descriptor and tid.
if (ctx->dead_list_size == 0) {
- TsanPrintf("ThreadSanitizer: %d thread limit exceeded. Dying.\n",
+ Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n",
kMaxTid);
Die();
}
@@ -100,12 +118,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
tctx->sync.Reset();
tid = tctx->tid;
DestroyAndFree(tctx->dead_info);
+ if (tctx->name) {
+ internal_free(tctx->name);
+ tctx->name = 0;
+ }
} else {
+ // Allocating new thread descriptor and tid.
StatInc(thr, StatThreadMaxTid);
tid = ctx->thread_seq++;
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
tctx = new(mem) ThreadContext(tid);
ctx->threads[tid] = tctx;
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
}
CHECK_NE(tctx, 0);
CHECK_GE(tid, 0);
@@ -126,18 +150,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
if (tid) {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&tctx->sync);
StatInc(thr, StatSyncRelease);
-
tctx->creation_stack.ObtainCurrent(thr, pc);
+ tctx->creation_tid = thr->tid;
}
return tid;
}
-void ThreadStart(ThreadState *thr, int tid) {
+void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
CHECK_GT(thr->in_rtl, 0);
uptr stk_addr = 0;
uptr stk_size = 0;
@@ -169,10 +193,14 @@ void ThreadStart(ThreadState *thr, int tid) {
CHECK_NE(tctx, 0);
CHECK_EQ(tctx->status, ThreadStatusCreated);
tctx->status = ThreadStatusRunning;
- tctx->epoch0 = tctx->epoch1 + 1;
+ tctx->os_id = os_id;
+ // RoundUp so that one trace part does not contain events
+ // from different threads.
+ tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize);
tctx->epoch1 = (u64)-1;
- new(thr) ThreadState(CTX(), tid, tctx->epoch0, stk_addr, stk_size,
- tls_addr, tls_size);
+ new(thr) ThreadState(CTX(), tid, tctx->unique_id,
+ tctx->epoch0, stk_addr, stk_size,
+ tls_addr, tls_size);
#ifdef TSAN_GO
// Setup dynamic shadow stack.
const int kInitStackSize = 8;
@@ -185,6 +213,9 @@ void ThreadStart(ThreadState *thr, int tid) {
thr->fast_synch_epoch = tctx->epoch0;
thr->clock.set(tid, tctx->epoch0);
thr->clock.acquire(&tctx->sync);
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts();
+ thr->trace.headers[trace].epoch0 = tctx->epoch0;
StatInc(thr, StatSyncAcquire);
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
@@ -219,7 +250,7 @@ void ThreadFinish(ThreadState *thr) {
} else {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&tctx->sync);
@@ -230,14 +261,16 @@ void ThreadFinish(ThreadState *thr) {
// Save from info about the thread.
tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
ThreadDeadInfo();
- internal_memcpy(&tctx->dead_info->trace.events[0],
- &thr->trace.events[0], sizeof(thr->trace.events));
- for (int i = 0; i < kTraceParts; i++) {
+ for (uptr i = 0; i < TraceParts(); i++) {
+ tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0;
tctx->dead_info->trace.headers[i].stack0.CopyFrom(
thr->trace.headers[i].stack0);
}
tctx->epoch1 = thr->fast_state.epoch();
+#ifndef TSAN_GO
+ AlloctorThreadFinish(thr);
+#endif
thr->~ThreadState();
StatAggregate(ctx->stat, thr->stat);
tctx->thr = 0;
@@ -270,9 +303,10 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
Lock l(&ctx->thread_mtx);
ThreadContext *tctx = ctx->threads[tid];
if (tctx->status == ThreadStatusInvalid) {
- TsanPrintf("ThreadSanitizer: join of non-existent thread\n");
+ Printf("ThreadSanitizer: join of non-existent thread\n");
return;
}
+ // FIXME(dvyukov): print message and continue (it's user error).
CHECK_EQ(tctx->detached, false);
CHECK_EQ(tctx->status, ThreadStatusFinished);
thr->clock.acquire(&tctx->sync);
@@ -288,7 +322,7 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
Lock l(&ctx->thread_mtx);
ThreadContext *tctx = ctx->threads[tid];
if (tctx->status == ThreadStatusInvalid) {
- TsanPrintf("ThreadSanitizer: detach of non-existent thread\n");
+ Printf("ThreadSanitizer: detach of non-existent thread\n");
return;
}
if (tctx->status == ThreadStatusFinished) {
@@ -298,8 +332,18 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
}
}
-void ThreadFinalizerGoroutine(ThreadState *thr) {
- thr->clock.Disable(thr->tid);
+void ThreadSetName(ThreadState *thr, const char *name) {
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ ThreadContext *tctx = ctx->threads[thr->tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(tctx->status, ThreadStatusRunning);
+ if (tctx->name) {
+ internal_free(tctx->name);
+ tctx->name = 0;
+ }
+ if (name)
+ tctx->name = internal_strdup(name);
}
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
@@ -314,19 +358,19 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
#if TSAN_DEBUG
if (!IsAppMem(addr)) {
- TsanPrintf("Access to non app mem %zx\n", addr);
+ Printf("Access to non app mem %zx\n", addr);
DCHECK(IsAppMem(addr));
}
if (!IsAppMem(addr + size - 1)) {
- TsanPrintf("Access to non app mem %zx\n", addr + size - 1);
+ Printf("Access to non app mem %zx\n", addr + size - 1);
DCHECK(IsAppMem(addr + size - 1));
}
if (!IsShadowMem((uptr)shadow_mem)) {
- TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
DCHECK(IsShadowMem((uptr)shadow_mem));
}
if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
- TsanPrintf("Bad shadow addr %p (%zx)\n",
+ Printf("Bad shadow addr %p (%zx)\n",
shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
}
@@ -340,7 +384,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
fast_state.IncrementEpoch();
thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
bool unaligned = (addr % kShadowCell) != 0;
@@ -350,7 +394,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
shadow_mem, cur);
}
if (unaligned)
@@ -361,7 +405,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
shadow_mem, cur);
shadow_mem += kShadowCnt;
}
@@ -371,7 +415,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
shadow_mem, cur);
}
}