aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_rmlock.c
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-11-07 16:57:53 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-11-07 16:57:53 +0000
commit42e7abd5db3ee5c9d9c37c0645fb3a880a701314 (patch)
treed14544e61a486196f0f64333fd8a22b21dd8e80c /sys/kern/kern_rmlock.c
parentecb4fdf943ca2ffa78b2564c40d27a4eea5da0da (diff)
downloadsrc-42e7abd5db3ee5c9d9c37c0645fb3a880a701314.tar.gz
src-42e7abd5db3ee5c9d9c37c0645fb3a880a701314.zip
Notes
Diffstat (limited to 'sys/kern/kern_rmlock.c')
-rw-r--r--sys/kern/kern_rmlock.c199
1 files changed, 162 insertions, 37 deletions
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 7fb0025225ed..401445d3d03f 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -878,10 +878,105 @@ db_show_rm(const struct lock_object *lock)
* problem at some point. The easiest way to lessen it is to provide a bitmap.
*/
+#define rms_int_membar() __compiler_membar()
+
#define RMS_NOOWNER ((void *)0x1)
#define RMS_TRANSIENT ((void *)0x2)
#define RMS_FLAGMASK 0xf
+struct rmslock_pcpu {
+ int influx;
+ int readers;
+};
+
+_Static_assert(sizeof(struct rmslock_pcpu) == 8, "bad size");
+
+/*
+ * Internal routines
+ */
+static struct rmslock_pcpu *
+rms_int_pcpu(struct rmslock *rms)
+{
+
+ CRITICAL_ASSERT(curthread);
+ return (zpcpu_get(rms->pcpu));
+}
+
+static struct rmslock_pcpu *
+rms_int_remote_pcpu(struct rmslock *rms, int cpu)
+{
+
+ return (zpcpu_get_cpu(rms->pcpu, cpu));
+}
+
+static void
+rms_int_influx_enter(struct rmslock *rms, struct rmslock_pcpu *pcpu)
+{
+
+ CRITICAL_ASSERT(curthread);
+ MPASS(pcpu->influx == 0);
+ pcpu->influx = 1;
+}
+
+static void
+rms_int_influx_exit(struct rmslock *rms, struct rmslock_pcpu *pcpu)
+{
+
+ CRITICAL_ASSERT(curthread);
+ MPASS(pcpu->influx == 1);
+ pcpu->influx = 0;
+}
+
+#ifdef INVARIANTS
+static void
+rms_int_debug_readers_inc(struct rmslock *rms)
+{
+ int old;
+ old = atomic_fetchadd_int(&rms->debug_readers, 1);
+ KASSERT(old >= 0, ("%s: bad readers count %d\n", __func__, old));
+}
+
+static void
+rms_int_debug_readers_dec(struct rmslock *rms)
+{
+ int old;
+
+ old = atomic_fetchadd_int(&rms->debug_readers, -1);
+ KASSERT(old > 0, ("%s: bad readers count %d\n", __func__, old));
+}
+#else
+static void
+rms_int_debug_readers_inc(struct rmslock *rms)
+{
+}
+
+static void
+rms_int_debug_readers_dec(struct rmslock *rms)
+{
+}
+#endif
+
+static void
+rms_int_readers_inc(struct rmslock *rms, struct rmslock_pcpu *pcpu)
+{
+
+ CRITICAL_ASSERT(curthread);
+ rms_int_debug_readers_inc(rms);
+ pcpu->readers++;
+}
+
+static void
+rms_int_readers_dec(struct rmslock *rms, struct rmslock_pcpu *pcpu)
+{
+
+ CRITICAL_ASSERT(curthread);
+ rms_int_debug_readers_dec(rms);
+ pcpu->readers--;
+}
+
+/*
+ * Public API
+ */
void
rms_init(struct rmslock *rms, const char *name)
{
@@ -889,9 +984,9 @@ rms_init(struct rmslock *rms, const char *name)
rms->owner = RMS_NOOWNER;
rms->writers = 0;
rms->readers = 0;
+ rms->debug_readers = 0;
mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW);
- rms->readers_pcpu = uma_zalloc_pcpu(pcpu_zone_4, M_WAITOK | M_ZERO);
- rms->readers_influx = uma_zalloc_pcpu(pcpu_zone_4, M_WAITOK | M_ZERO);
+ rms->pcpu = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK | M_ZERO);
}
void
@@ -901,23 +996,21 @@ rms_destroy(struct rmslock *rms)
MPASS(rms->writers == 0);
MPASS(rms->readers == 0);
mtx_destroy(&rms->mtx);
- uma_zfree_pcpu(pcpu_zone_4, rms->readers_pcpu);
- uma_zfree_pcpu(pcpu_zone_4, rms->readers_influx);
+ uma_zfree_pcpu(pcpu_zone_8, rms->pcpu);
}
static void __noinline
rms_rlock_fallback(struct rmslock *rms)
{
- zpcpu_set_protected(rms->readers_influx, 0);
+ rms_int_influx_exit(rms, rms_int_pcpu(rms));
critical_exit();
mtx_lock(&rms->mtx);
- MPASS(*zpcpu_get(rms->readers_pcpu) == 0);
while (rms->writers > 0)
msleep(&rms->readers, &rms->mtx, PUSER - 1, mtx_name(&rms->mtx), 0);
critical_enter();
- zpcpu_add_protected(rms->readers_pcpu, 1);
+ rms_int_readers_inc(rms, rms_int_pcpu(rms));
mtx_unlock(&rms->mtx);
critical_exit();
}
@@ -925,43 +1018,46 @@ rms_rlock_fallback(struct rmslock *rms)
void
rms_rlock(struct rmslock *rms)
{
+ struct rmslock_pcpu *pcpu;
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
MPASS(atomic_load_ptr(&rms->owner) != curthread);
critical_enter();
- zpcpu_set_protected(rms->readers_influx, 1);
- __compiler_membar();
+ pcpu = rms_int_pcpu(rms);
+ rms_int_influx_enter(rms, pcpu);
+ rms_int_membar();
if (__predict_false(rms->writers > 0)) {
rms_rlock_fallback(rms);
return;
}
- __compiler_membar();
- zpcpu_add_protected(rms->readers_pcpu, 1);
- __compiler_membar();
- zpcpu_set_protected(rms->readers_influx, 0);
+ rms_int_membar();
+ rms_int_readers_inc(rms, pcpu);
+ rms_int_membar();
+ rms_int_influx_exit(rms, pcpu);
critical_exit();
}
int
rms_try_rlock(struct rmslock *rms)
{
+ struct rmslock_pcpu *pcpu;
MPASS(atomic_load_ptr(&rms->owner) != curthread);
critical_enter();
- zpcpu_set_protected(rms->readers_influx, 1);
- __compiler_membar();
+ pcpu = rms_int_pcpu(rms);
+ rms_int_influx_enter(rms, pcpu);
+ rms_int_membar();
if (__predict_false(rms->writers > 0)) {
- __compiler_membar();
- zpcpu_set_protected(rms->readers_influx, 0);
+ rms_int_influx_exit(rms, pcpu);
critical_exit();
return (0);
}
- __compiler_membar();
- zpcpu_add_protected(rms->readers_pcpu, 1);
- __compiler_membar();
- zpcpu_set_protected(rms->readers_influx, 0);
+ rms_int_membar();
+ rms_int_readers_inc(rms, pcpu);
+ rms_int_membar();
+ rms_int_influx_exit(rms, pcpu);
critical_exit();
return (1);
}
@@ -970,13 +1066,14 @@ static void __noinline
rms_runlock_fallback(struct rmslock *rms)
{
- zpcpu_set_protected(rms->readers_influx, 0);
+ rms_int_influx_exit(rms, rms_int_pcpu(rms));
critical_exit();
mtx_lock(&rms->mtx);
- MPASS(*zpcpu_get(rms->readers_pcpu) == 0);
MPASS(rms->writers > 0);
MPASS(rms->readers > 0);
+ MPASS(rms->debug_readers == rms->readers);
+ rms_int_debug_readers_dec(rms);
rms->readers--;
if (rms->readers == 0)
wakeup_one(&rms->writers);
@@ -986,18 +1083,20 @@ rms_runlock_fallback(struct rmslock *rms)
void
rms_runlock(struct rmslock *rms)
{
+ struct rmslock_pcpu *pcpu;
critical_enter();
- zpcpu_set_protected(rms->readers_influx, 1);
- __compiler_membar();
+ pcpu = rms_int_pcpu(rms);
+ rms_int_influx_enter(rms, pcpu);
+ rms_int_membar();
if (__predict_false(rms->writers > 0)) {
rms_runlock_fallback(rms);
return;
}
- __compiler_membar();
- zpcpu_sub_protected(rms->readers_pcpu, 1);
- __compiler_membar();
- zpcpu_set_protected(rms->readers_influx, 0);
+ rms_int_membar();
+ rms_int_readers_dec(rms, pcpu);
+ rms_int_membar();
+ rms_int_influx_exit(rms, pcpu);
critical_exit();
}
@@ -1010,17 +1109,19 @@ static void
rms_action_func(void *arg)
{
struct rmslock_ipi *rmsipi;
+ struct rmslock_pcpu *pcpu;
struct rmslock *rms;
- int readers;
rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
rms = rmsipi->rms;
+ pcpu = rms_int_pcpu(rms);
- if (*zpcpu_get(rms->readers_influx))
+ if (pcpu->influx)
return;
- readers = zpcpu_replace(rms->readers_pcpu, 0);
- if (readers != 0)
- atomic_add_int(&rms->readers, readers);
+ if (pcpu->readers != 0) {
+ atomic_add_int(&rms->readers, pcpu->readers);
+ pcpu->readers = 0;
+ }
smp_rendezvous_cpus_done(arg);
}
@@ -1028,17 +1129,39 @@ static void
rms_wait_func(void *arg, int cpu)
{
struct rmslock_ipi *rmsipi;
+ struct rmslock_pcpu *pcpu;
struct rmslock *rms;
- int *in_op;
rmsipi = __containerof(arg, struct rmslock_ipi, srcra);
rms = rmsipi->rms;
+ pcpu = rms_int_remote_pcpu(rms, cpu);
- in_op = zpcpu_get_cpu(rms->readers_influx, cpu);
- while (atomic_load_int(in_op))
+ while (atomic_load_int(&pcpu->influx))
cpu_spinwait();
}
+#ifdef INVARIANTS
+static void
+rms_assert_no_pcpu_readers(struct rmslock *rms)
+{
+ struct rmslock_pcpu *pcpu;
+ int cpu;
+
+ CPU_FOREACH(cpu) {
+ pcpu = rms_int_remote_pcpu(rms, cpu);
+ if (pcpu->readers != 0) {
+ panic("%s: got %d readers on cpu %d\n", __func__,
+ pcpu->readers, cpu);
+ }
+ }
+}
+#else
+static void
+rms_assert_no_pcpu_readers(struct rmslock *rms)
+{
+}
+#endif
+
static void
rms_wlock_switch(struct rmslock *rms)
{
@@ -1080,6 +1203,7 @@ rms_wlock(struct rmslock *rms)
("%s: unexpected owner value %p\n", __func__, rms->owner));
rms_wlock_switch(rms);
+ rms_assert_no_pcpu_readers(rms);
if (rms->readers > 0) {
msleep(&rms->writers, &rms->mtx, (PUSER - 1),
@@ -1088,6 +1212,7 @@ rms_wlock(struct rmslock *rms)
out_grab:
rms->owner = curthread;
+ rms_assert_no_pcpu_readers(rms);
mtx_unlock(&rms->mtx);
MPASS(rms->readers == 0);
}