diff options
Diffstat (limited to 'lib/tsan/rtl/tsan_rtl_mutex.cc')
-rw-r--r-- | lib/tsan/rtl/tsan_rtl_mutex.cc | 297 |
1 files changed, 220 insertions, 77 deletions
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc index 409391678f013..ddf2b69bdbbcc 100644 --- a/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -11,6 +11,9 @@ // //===----------------------------------------------------------------------===// +#include <sanitizer_common/sanitizer_deadlock_detector_interface.h> +#include <sanitizer_common/sanitizer_stackdepot.h> + #include "tsan_rtl.h" #include "tsan_flags.h" #include "tsan_sync.h" @@ -20,10 +23,51 @@ namespace __tsan { +void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r); + +struct Callback : DDCallback { + ThreadState *thr; + uptr pc; + + Callback(ThreadState *thr, uptr pc) + : thr(thr) + , pc(pc) { + DDCallback::pt = thr->dd_pt; + DDCallback::lt = thr->dd_lt; + } + + virtual u32 Unwind() { + return CurrentStackId(thr, pc); + } + virtual int UniqueTid() { + return thr->unique_id; + } +}; + +void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) { + Callback cb(thr, pc); + ctx->dd->MutexInit(&cb, &s->dd); + s->dd.ctx = s->GetId(); +} + +static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, + uptr addr, u64 mid) { + // In Go, these misuses are either impossible, or detected by std lib, + // or false positives (e.g. unlock in a different thread). + if (kGoMode) + return; + ThreadRegistryLock l(ctx->thread_registry); + ScopedReport rep(typ); + rep.AddMutex(mid); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace, true); + rep.AddLocation(addr, 1); + OutputReport(thr, rep); +} + void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive, bool linker_init) { - Context *ctx = CTX(); - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr); StatInc(thr, StatMutexCreate); if (!linker_init && IsAppMem(addr)) { @@ -32,71 +76,92 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->is_rw = rw; s->is_recursive = recursive; s->is_linker_init = linker_init; + if (kCppMode && s->creation_stack_id == 0) + s->creation_stack_id = CurrentStackId(thr, pc); s->mtx.Unlock(); } void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { - Context *ctx = CTX(); - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); StatInc(thr, StatMutexDestroy); -#ifndef TSAN_GO +#ifndef SANITIZER_GO // Global mutexes not marked as LINKER_INITIALIZED // cause tons of not interesting reports, so just ignore it. if (IsGlobalVar(addr)) return; #endif - SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr); - if (s == 0) - return; if (IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); + if (s == 0) + return; + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ctx->dd->MutexDestroy(&cb, &s->dd); + ctx->dd->MutexInit(&cb, &s->dd); + } + bool unlock_locked = false; if (flags()->report_destroy_locked && s->owner_tid != SyncVar::kInvalidTid && !s->is_broken) { s->is_broken = true; + unlock_locked = true; + } + u64 mid = s->GetId(); + u32 last_lock = s->last_lock; + if (!unlock_locked) + s->Reset(thr); // must not reset it before the report is printed + s->mtx.Unlock(); + if (unlock_locked) { ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); - rep.AddMutex(s); - StackTrace trace; - trace.ObtainCurrent(thr, pc); - rep.AddStack(&trace); - FastState last(s->last_lock); + rep.AddMutex(mid); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace); + FastState last(last_lock); RestoreStack(last.tid(), last.epoch(), &trace, 0); - rep.AddStack(&trace); - rep.AddLocation(s->addr, 1); - OutputReport(ctx, rep); + rep.AddStack(trace, true); + rep.AddLocation(addr, 1); + OutputReport(thr, rep); + } + if (unlock_locked) { + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); + if (s != 0) { + s->Reset(thr); + s->mtx.Unlock(); + } } - thr->mset.Remove(s->GetId()); - DestroyAndFree(s); + thr->mset.Remove(mid); + // s will be destroyed and freed in MetaMap::FreeBlock. } -void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { - CHECK_GT(thr->in_rtl, 0); +void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) { DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec); CHECK_GT(rec, 0); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); + bool report_double_lock = false; if (s->owner_tid == SyncVar::kInvalidTid) { CHECK_EQ(s->recursion, 0); s->owner_tid = thr->tid; s->last_lock = thr->fast_state.raw(); } else if (s->owner_tid == thr->tid) { CHECK_GT(s->recursion, 0); - } else { - Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr); - PrintCurrentStack(thr, pc); + } else if (flags()->report_mutex_bugs && !s->is_broken) { + s->is_broken = true; + report_double_lock = true; } if (s->recursion == 0) { StatInc(thr, StatMutexLock); @@ -107,30 +172,36 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { } s->recursion += rec; thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); + if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) { + Callback cb(thr, pc); + if (!try_lock) + ctx->dd->MutexBeforeLock(&cb, &s->dd, true); + ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock); + } + u64 mid = s->GetId(); s->mtx.Unlock(); + // Can't touch s after this point. + if (report_double_lock) + ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } } int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); int rec = 0; - if (s->recursion == 0) { - if (!s->is_broken) { + bool report_bad_unlock = false; + if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) { + if (flags()->report_mutex_bugs && !s->is_broken) { s->is_broken = true; - Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr); - PrintCurrentStack(thr, pc); - } - } else if (s->owner_tid != thr->tid) { - if (!s->is_broken) { - s->is_broken = true; - Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n", - addr); - PrintCurrentStack(thr, pc); + report_bad_unlock = true; } } else { rec = all ? s->recursion : 1; @@ -144,56 +215,97 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { } } thr->mset.Del(s->GetId(), true); + if (common_flags()->detect_deadlocks && s->recursion == 0 && + !report_bad_unlock) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true); + } + u64 mid = s->GetId(); s->mtx.Unlock(); + // Can't touch s after this point. + if (report_bad_unlock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); + if (common_flags()->detect_deadlocks && !report_bad_unlock) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } return rec; } -void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) { - CHECK_GT(thr->in_rtl, 0); +void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) { DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadLock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); + bool report_bad_lock = false; if (s->owner_tid != SyncVar::kInvalidTid) { - Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n", - addr); - PrintCurrentStack(thr, pc); + if (flags()->report_mutex_bugs && !s->is_broken) { + s->is_broken = true; + report_bad_lock = true; + } } AcquireImpl(thr, pc, &s->clock); s->last_lock = thr->fast_state.raw(); thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + if (!trylock) + ctx->dd->MutexBeforeLock(&cb, &s->dd, false); + ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock); + } + u64 mid = s->GetId(); s->mtx.ReadUnlock(); + // Can't touch s after this point. + if (report_bad_lock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } } void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadUnlock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); + bool report_bad_unlock = false; if (s->owner_tid != SyncVar::kInvalidTid) { - Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n", - addr); - PrintCurrentStack(thr, pc); + if (flags()->report_mutex_bugs && !s->is_broken) { + s->is_broken = true; + report_bad_unlock = true; + } } ReleaseImpl(thr, pc, &s->read_clock); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false); + } + u64 mid = s->GetId(); s->mtx.Unlock(); - thr->mset.Del(s->GetId(), false); + // Can't touch s after this point. + thr->mset.Del(mid, false); + if (report_bad_unlock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } } void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); bool write = true; + bool report_bad_unlock = false; if (s->owner_tid == SyncVar::kInvalidTid) { // Seems to be read unlock. write = false; @@ -216,30 +328,37 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { } } else if (!s->is_broken) { s->is_broken = true; - Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n", - addr); - PrintCurrentStack(thr, pc); + report_bad_unlock = true; } thr->mset.Del(s->GetId(), write); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write); + } + u64 mid = s->GetId(); s->mtx.Unlock(); + // Can't touch s after this point. + if (report_bad_unlock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } } void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { - Context *ctx = CTX(); - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->owner_tid = SyncVar::kInvalidTid; s->recursion = 0; s->mtx.Unlock(); } void Acquire(ThreadState *thr, uptr pc, uptr addr) { - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: Acquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); AcquireImpl(thr, pc, &s->clock); s->mtx.ReadUnlock(); } @@ -257,17 +376,16 @@ void AcquireGlobal(ThreadState *thr, uptr pc) { DPrintf("#%d: AcquireGlobal\n", thr->tid); if (thr->ignore_sync) return; - ThreadRegistryLock l(CTX()->thread_registry); - CTX()->thread_registry->RunCallbackForEachThreadLocked( + ThreadRegistryLock l(ctx->thread_registry); + ctx->thread_registry->RunCallbackForEachThreadLocked( UpdateClockCallback, thr); } void Release(ThreadState *thr, uptr pc, uptr addr) { - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); @@ -276,11 +394,10 @@ void Release(ThreadState *thr, uptr pc, uptr addr) { } void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { - CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); @@ -288,7 +405,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { s->mtx.Unlock(); } -#ifndef TSAN_GO +#ifndef SANITIZER_GO static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast<ThreadState*>(arg); ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); @@ -303,8 +420,8 @@ void AfterSleep(ThreadState *thr, uptr pc) { if (thr->ignore_sync) return; thr->last_sleep_stack_id = CurrentStackId(thr, pc); - ThreadRegistryLock l(CTX()->thread_registry); - CTX()->thread_registry->RunCallbackForEachThreadLocked( + ThreadRegistryLock l(ctx->thread_registry); + ctx->thread_registry->RunCallbackForEachThreadLocked( UpdateSleepClockCallback, thr); } #endif @@ -312,37 +429,63 @@ void AfterSleep(ThreadState *thr, uptr pc) { void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.acquire(c); + thr->clock.set(thr->fast_state.epoch()); + thr->clock.acquire(&thr->clock_cache, c); StatInc(thr, StatSyncAcquire); } void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; - thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(c); + thr->clock.release(&thr->clock_cache, c); StatInc(thr, StatSyncRelease); } void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; - thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.ReleaseStore(c); + thr->clock.ReleaseStore(&thr->clock_cache, c); StatInc(thr, StatSyncRelease); } void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; - thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.acq_rel(c); + thr->clock.acq_rel(&thr->clock_cache, c); StatInc(thr, StatSyncAcquire); StatInc(thr, StatSyncRelease); } +void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { + if (r == 0) + return; + ThreadRegistryLock l(ctx->thread_registry); + ScopedReport rep(ReportTypeDeadlock); + for (int i = 0; i < r->n; i++) { + rep.AddMutex(r->loop[i].mtx_ctx0); + rep.AddUniqueTid((int)r->loop[i].thr_ctx); + rep.AddThread((int)r->loop[i].thr_ctx); + } + uptr dummy_pc = 0x42; + for (int i = 0; i < r->n; i++) { + for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { + u32 stk = r->loop[i].stk[j]; + if (stk) { + rep.AddStack(StackDepotGet(stk), true); + } else { + // Sometimes we fail to extract the stack trace (FIXME: investigate), + // but we should still produce some stack trace in the report. + rep.AddStack(StackTrace(&dummy_pc, 1), true); + } + } + } + OutputReport(thr, rep); +} + } // namespace __tsan |