summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-11-11 18:43:51 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-11-11 18:43:51 +0000
commitc5315f51960d853151756a1f4ffc8858665b2934 (patch)
tree7e900a7eb0b36b2ea9fca1e2e6ca87e4b6fb77bd /sys
parent54bf96fb4f167fdbc66232f5ec4a7f3f15d2db8f (diff)
downloadsrc-test2-c5315f51960d853151756a1f4ffc8858665b2934.tar.gz
src-test2-c5315f51960d853151756a1f4ffc8858665b2934.zip
Notes
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_thread.c48
-rw-r--r--sys/sys/proc.h5
2 files changed, 29 insertions, 24 deletions
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index da5eaaaf2e34..535b5289133f 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -128,9 +128,7 @@ SDT_PROBE_DEFINE(proc, , , lwp__exit);
*/
static uma_zone_t thread_zone;
-TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
-static struct mtx zombie_lock;
-MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
+static __exclusive_cache_line struct thread *thread_zombies;
static void thread_zombie(struct thread *);
static int thread_unsuspend_one(struct thread *td, struct proc *p,
@@ -409,14 +407,20 @@ threadinit(void)
/*
* Place an unused thread on the zombie list.
- * Use the slpq as that must be unused by now.
*/
void
thread_zombie(struct thread *td)
{
- mtx_lock_spin(&zombie_lock);
- TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
- mtx_unlock_spin(&zombie_lock);
+ struct thread *ztd;
+
+ ztd = atomic_load_ptr(&thread_zombies);
+ for (;;) {
+ td->td_zombie = ztd;
+ if (atomic_fcmpset_rel_ptr((uintptr_t *)&thread_zombies,
+ (uintptr_t *)&ztd, (uintptr_t)td))
+ break;
+ continue;
+ }
}
/*
@@ -430,29 +434,27 @@ thread_stash(struct thread *td)
}
/*
- * Reap zombie resources.
+ * Reap zombie threads.
*/
void
thread_reap(void)
{
- struct thread *td_first, *td_next;
+ struct thread *itd, *ntd;
/*
- * Don't even bother to lock if none at this instant,
- * we really don't care about the next instant.
+ * Reading upfront is pessimal if followed by concurrent atomic_swap,
+ * but most of the time the list is empty.
*/
- if (!TAILQ_EMPTY(&zombie_threads)) {
- mtx_lock_spin(&zombie_lock);
- td_first = TAILQ_FIRST(&zombie_threads);
- if (td_first)
- TAILQ_INIT(&zombie_threads);
- mtx_unlock_spin(&zombie_lock);
- while (td_first) {
- td_next = TAILQ_NEXT(td_first, td_slpq);
- thread_cow_free(td_first);
- thread_free(td_first);
- td_first = td_next;
- }
+ if (thread_zombies == NULL)
+ return;
+
+ itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&thread_zombies,
+ (uintptr_t)NULL);
+ while (itd != NULL) {
+ ntd = itd->td_zombie;
+ thread_cow_free(itd);
+ thread_free(itd);
+ itd = ntd;
}
}
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 474dc0f2c5b7..21b0a2f0a41c 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -229,7 +229,10 @@ struct thread {
struct proc *td_proc; /* (*) Associated process. */
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */
- TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
+ union {
+ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
+ struct thread *td_zombie; /* Zombie list linkage */
+ };
TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */
struct cpuset *td_cpuset; /* (t) CPU affinity mask. */