diff options
| author | John Baldwin <jhb@FreeBSD.org> | 2002-08-01 13:35:38 +0000 |
|---|---|---|
| committer | John Baldwin <jhb@FreeBSD.org> | 2002-08-01 13:35:38 +0000 |
| commit | fbd140c78603207852b35721b6ef3520e29b7bd7 (patch) | |
| tree | 26f84d226370d3aaa0b90ab4f5e6465a0baf47ba | |
| parent | 8f3e3652e43d8747c0ccfe70d9cae96b444c1490 (diff) | |
Notes
| -rw-r--r-- | sys/kern/kern_exit.c | 2 | ||||
| -rw-r--r-- | sys/kern/kern_ktrace.c | 8 | ||||
| -rw-r--r-- | sys/kern/kern_shutdown.c | 20 | ||||
| -rw-r--r-- | sys/kern/subr_taskqueue.c | 1 |
4 files changed, 18 insertions, 13 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index fea5438f3f22..7d08e5660157 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -261,7 +261,7 @@ exit1(td, rv) * Close open files and release open-file table. * This may block! */ - fdfree(td); /* XXXKSE *//* may not be the one in proc */ + fdfree(td); /* * Remove ourself from our leader's peer list and wake our leader. diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c index d38a0ceabb5f..5a40e09660ff 100644 --- a/sys/kern/kern_ktrace.c +++ b/sys/kern/kern_ktrace.c @@ -271,9 +271,11 @@ ktr_freerequest(struct ktr_request *req) { crfree(req->ktr_cred); - mtx_lock(&Giant); - vrele(req->ktr_vp); - mtx_unlock(&Giant); + if (req->ktr_vp != NULL) { + mtx_lock(&Giant); + vrele(req->ktr_vp); + mtx_unlock(&Giant); + } mtx_lock(&ktrace_mtx); STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list); mtx_unlock(&ktrace_mtx); diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index 13a9c7aa26a5..ec740ec4ee5b 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -420,7 +420,7 @@ shutdown_reset(void *junk, int howto) } #ifdef SMP -static u_int panic_cpu = NOCPU; +static uintptr_t panic_thread = NULL; #endif /* @@ -441,15 +441,17 @@ panic(const char *fmt, ...) #ifdef SMP /* * We don't want multiple CPU's to panic at the same time, so we - * use panic_cpu as a simple spinlock. We have to keep checking - * panic_cpu if we are spinning in case the panic on the first + * use panic_thread as a simple spinlock. We have to keep checking + * panic_thread if we are spinning in case the panic on the first * CPU is canceled. */ - if (panic_cpu != PCPU_GET(cpuid)) - while (atomic_cmpset_int(&panic_cpu, NOCPU, - PCPU_GET(cpuid)) == 0) - while (panic_cpu != NOCPU) - ; /* nothing */ + if (panic_thread != curthread) + while (atomic_cmpset_ptr(&panic_thread, NULL, curthread) == 0) + while (panic_thread != NULL) { +#ifdef __i386__ + ia32_pause(); +#endif + } #endif bootopt = RB_AUTOBOOT | RB_DUMP; @@ -481,7 +483,7 @@ panic(const char *fmt, ...) /* See if the user aborted the panic, in which case we continue. */ if (panicstr == NULL) { #ifdef SMP - atomic_store_rel_int(&panic_cpu, NOCPU); + atomic_store_rel_ptr(&panic_thread, NULL); #endif return; } diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c index 19a93ad6c694..28fbe3873564 100644 --- a/sys/kern/subr_taskqueue.c +++ b/sys/kern/subr_taskqueue.c @@ -94,6 +94,7 @@ taskqueue_free(struct taskqueue *queue) { mtx_lock(&queue->tq_mutex); + KASSERT(queue->tq_draining == 0, ("free'ing a draining taskqueue")); queue->tq_draining = 1; mtx_unlock(&queue->tq_mutex); |
