aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorAlex Richardson <arichardson@FreeBSD.org>2021-02-18 10:25:10 +0000
committerAlex Richardson <arichardson@FreeBSD.org>2021-02-18 14:02:48 +0000
commitfa2528ac643519072c498b483d0dcc1fa5d99bc1 (patch)
tree255d2d54811e94f63b72fe40c4a88b4f11978e9f /sys/kern
parentdf093aa9463b2121d8307fb91c4ba7cf17f4ea64 (diff)
downloadsrc-fa2528ac643519072c498b483d0dcc1fa5d99bc1.tar.gz
src-fa2528ac643519072c498b483d0dcc1fa5d99bc1.zip
Use atomic loads/stores when updating td->td_state
KCSAN complains about racy accesses in the locking code. Those races are fine since they are inside a TD_SET_RUNNING() loop that expects the value to be changed by another CPU. Use relaxed atomic stores/loads to indicate that this variable can be written/read by multiple CPUs at the same time. This will also prevent the compiler from doing unexpected re-ordering. Reported by: GENERIC-KCSAN Test Plan: KCSAN no longer complains, kernel still runs fine. Reviewed By: markj, mjg (earlier version) Differential Revision: https://reviews.freebsd.org/D28569
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_intr.c2
-rw-r--r--sys/kern/kern_prot.c2
-rw-r--r--sys/kern/kern_racct.c2
-rw-r--r--sys/kern/kern_synch.c4
-rw-r--r--sys/kern/kern_thread.c8
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/subr_turnstile.c6
8 files changed, 14 insertions, 14 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 5eb8186c23ca..2d6a4f636240 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -499,7 +499,7 @@ proc0_init(void *dummy __unused)
p->p_nice = NZERO;
td->td_tid = THREAD0_TID;
tidhash_add(td);
- td->td_state = TDS_RUNNING;
+ TD_SET_STATE(td, TDS_RUNNING);
td->td_pri_class = PRI_TIMESHARE;
td->td_user_pri = PUSER;
td->td_base_user_pri = PUSER;
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 0e11af2123e2..af7c52c6b176 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -992,7 +992,7 @@ intr_event_schedule_thread(struct intr_event *ie)
sched_add(td, SRQ_INTR);
} else {
CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
- __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state);
+ __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
thread_unlock(td);
}
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index 170e9598835e..a107c7cced95 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -1955,7 +1955,7 @@ credbatch_add(struct credbatch *crb, struct thread *td)
MPASS(td->td_realucred != NULL);
MPASS(td->td_realucred == td->td_ucred);
- MPASS(td->td_state == TDS_INACTIVE);
+ MPASS(TD_GET_STATE(td) == TDS_INACTIVE);
cr = td->td_realucred;
KASSERT(cr->cr_users > 0, ("%s: users %d not > 0 on cred %p",
__func__, cr->cr_users, cr));
diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c
index 4df1c72d50f7..7d179fe69844 100644
--- a/sys/kern/kern_racct.c
+++ b/sys/kern/kern_racct.c
@@ -1147,7 +1147,7 @@ racct_proc_throttle(struct proc *p, int timeout)
thread_lock(td);
td->td_flags |= TDF_ASTPENDING;
- switch (td->td_state) {
+ switch (TD_GET_STATE(td)) {
case TDS_RUNQ:
/*
* If the thread is on the scheduler run-queue, we can
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 4c0491ab6e85..dcca67326264 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -570,7 +570,7 @@ setrunnable(struct thread *td, int srqflags)
("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
swapin = 0;
- switch (td->td_state) {
+ switch (TD_GET_STATE(td)) {
case TDS_RUNNING:
case TDS_RUNQ:
break;
@@ -593,7 +593,7 @@ setrunnable(struct thread *td, int srqflags)
}
break;
default:
- panic("setrunnable: state 0x%x", td->td_state);
+ panic("setrunnable: state 0x%x", TD_GET_STATE(td));
}
if ((srqflags & (SRQ_HOLD | SRQ_HOLDTD)) == 0)
thread_unlock(td);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 3561895d9fff..ea569576e7c9 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -346,7 +346,7 @@ thread_ctor(void *mem, int size, void *arg, int flags)
struct thread *td;
td = (struct thread *)mem;
- td->td_state = TDS_INACTIVE;
+ TD_SET_STATE(td, TDS_INACTIVE);
td->td_lastcpu = td->td_oncpu = NOCPU;
/*
@@ -379,7 +379,7 @@ thread_dtor(void *mem, int size, void *arg)
#ifdef INVARIANTS
/* Verify that this thread is in a safe state to free. */
- switch (td->td_state) {
+ switch (TD_GET_STATE(td)) {
case TDS_INHIBITED:
case TDS_RUNNING:
case TDS_CAN_RUN:
@@ -944,7 +944,7 @@ thread_exit(void)
rucollect(&p->p_ru, &td->td_ru);
PROC_STATUNLOCK(p);
- td->td_state = TDS_INACTIVE;
+ TD_SET_STATE(td, TDS_INACTIVE);
#ifdef WITNESS
witness_thread_exit(td);
#endif
@@ -993,7 +993,7 @@ thread_link(struct thread *td, struct proc *p)
* its lock has been created.
* PROC_LOCK_ASSERT(p, MA_OWNED);
*/
- td->td_state = TDS_INACTIVE;
+ TD_SET_STATE(td, TDS_INACTIVE);
td->td_proc = p;
td->td_flags = TDF_INMEM;
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 7b44e12ef330..7ce9bd81704f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1764,7 +1764,7 @@ sched_affinity(struct thread *td)
if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
return;
- switch (td->td_state) {
+ switch (TD_GET_STATE(td)) {
case TDS_RUNQ:
/*
* If we are on a per-CPU runqueue that is in the set,
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 2c8f2909f2b3..d966a796c867 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -288,7 +288,7 @@ propagate_priority(struct thread *td)
*/
KASSERT(TD_ON_LOCK(td), (
"thread %d(%s):%d holds %s but isn't blocked on a lock\n",
- td->td_tid, td->td_name, td->td_state,
+ td->td_tid, td->td_name, TD_GET_STATE(td),
ts->ts_lockobj->lo_name));
/*
@@ -1185,7 +1185,7 @@ print_lockchain(struct thread *td, const char *prefix)
}
db_printf("%sthread %d (pid %d, %s) is ", prefix, td->td_tid,
td->td_proc->p_pid, td->td_name);
- switch (td->td_state) {
+ switch (TD_GET_STATE(td)) {
case TDS_INACTIVE:
db_printf("inactive\n");
return;
@@ -1224,7 +1224,7 @@ print_lockchain(struct thread *td, const char *prefix)
db_printf("inhibited: %s\n", KTDSTATE(td));
return;
default:
- db_printf("??? (%#x)\n", td->td_state);
+ db_printf("??? (%#x)\n", TD_GET_STATE(td));
return;
}
}