diff options
| author | John Baldwin <jhb@FreeBSD.org> | 2002-01-05 08:29:54 +0000 |
|---|---|---|
| committer | John Baldwin <jhb@FreeBSD.org> | 2002-01-05 08:29:54 +0000 |
| commit | 422f61655f0b6793c00d38cd362c76acf4462877 (patch) | |
| tree | cd9e019894449acdf2a62111486edb33e84d0956 | |
| parent | e0c58e64c0a26e54feca70f522797c131b7c8554 (diff) | |
Notes
| -rw-r--r-- | sys/kern/subr_witness.c | 19 |
1 files changed, 3 insertions, 16 deletions
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c index 1c70d8685b68..b7985790125a 100644 --- a/sys/kern/subr_witness.c +++ b/sys/kern/subr_witness.c @@ -475,26 +475,13 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line) class = lock->lo_class; td = curthread; - /* - * We have to hold a spinlock to keep lock_list valid across the check - * in the LC_SLEEPLOCK case. In the LC_SPINLOCK case, it is already - * protected by the spinlock we are currently performing the witness - * checks on, so it is ok to release the lock after performing this - * check. All we have to protect is the LC_SLEEPLOCK case when no - * spinlocks are held as we may get preempted during this check and - * lock_list could end up pointing to some other CPU's spinlock list. - */ - mtx_lock_spin(&w_mtx); - lock_list = PCPU_PTR(spinlocks); if (class->lc_flags & LC_SLEEPLOCK) { - if (*lock_list != NULL && (flags & LOP_TRYLOCK) == 0) { - mtx_unlock_spin(&w_mtx); + if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0) panic("blockable sleep lock (%s) %s @ %s:%d", class->lc_name, lock->lo_name, file, line); - } lock_list = &td->td_sleeplocks; - } - mtx_unlock_spin(&w_mtx); + } else + lock_list = PCPU_PTR(spinlocks); /* * Try locks do not block if they fail to acquire the lock, thus |
