summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/libthr/arch/amd64/Makefile.inc2
-rw-r--r--lib/libthr/arch/amd64/include/pthread_md.h3
-rw-r--r--lib/libthr/arch/i386/Makefile.inc2
-rw-r--r--lib/libthr/arch/i386/include/pthread_md.h3
-rw-r--r--lib/libthr/thread/thr_private.h9
-rw-r--r--lib/libthr/thread/thr_rwlock.c325
-rw-r--r--lib/libthr/thread/thr_umtx.c87
-rw-r--r--lib/libthr/thread/thr_umtx.h81
8 files changed, 288 insertions, 224 deletions
diff --git a/lib/libthr/arch/amd64/Makefile.inc b/lib/libthr/arch/amd64/Makefile.inc
index 439bc698d1d1..f62999f9c88e 100644
--- a/lib/libthr/arch/amd64/Makefile.inc
+++ b/lib/libthr/arch/amd64/Makefile.inc
@@ -2,4 +2,4 @@
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= pthread_md.c
+SRCS+= pthread_md.c _umtx_op_err.S
diff --git a/lib/libthr/arch/amd64/include/pthread_md.h b/lib/libthr/arch/amd64/include/pthread_md.h
index 6814cacceee0..7c4f3ba865d5 100644
--- a/lib/libthr/arch/amd64/include/pthread_md.h
+++ b/lib/libthr/arch/amd64/include/pthread_md.h
@@ -97,4 +97,7 @@ _get_curthread(void)
{
return (TCB_GET64(tcb_thread));
}
+
+#define HAS__UMTX_OP_ERR 1
+
#endif
diff --git a/lib/libthr/arch/i386/Makefile.inc b/lib/libthr/arch/i386/Makefile.inc
index b6a4acd6f60c..1d3aa97c2b4e 100644
--- a/lib/libthr/arch/i386/Makefile.inc
+++ b/lib/libthr/arch/i386/Makefile.inc
@@ -2,4 +2,4 @@
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= pthread_md.c
+SRCS+= pthread_md.c _umtx_op_err.S
diff --git a/lib/libthr/arch/i386/include/pthread_md.h b/lib/libthr/arch/i386/include/pthread_md.h
index 1f629e176fb1..5c00cf694d31 100644
--- a/lib/libthr/arch/i386/include/pthread_md.h
+++ b/lib/libthr/arch/i386/include/pthread_md.h
@@ -102,4 +102,7 @@ _get_curthread(void)
{
return (TCB_GET32(tcb_thread));
}
+
+#define HAS__UMTX_OP_ERR 1
+
#endif
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
index 266afffaefb9..2685cd204e20 100644
--- a/lib/libthr/thread/thr_private.h
+++ b/lib/libthr/thread/thr_private.h
@@ -266,11 +266,8 @@ struct pthread_rwlockattr {
};
struct pthread_rwlock {
- pthread_mutex_t lock; /* monitor lock */
- pthread_cond_t read_signal;
- pthread_cond_t write_signal;
- int state; /* 0 = idle >0 = # of readers -1 = writer */
- int blocked_writers;
+ struct urwlock lock;
+ struct pthread *owner;
};
/*
@@ -699,6 +696,8 @@ ssize_t __sys_write(int, const void *, size_t);
void __sys_exit(int);
#endif
+int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden;
+
static inline int
_thr_isthreaded(void)
{
diff --git a/lib/libthr/thread/thr_rwlock.c b/lib/libthr/thread/thr_rwlock.c
index 81d145976245..641749b3b427 100644
--- a/lib/libthr/thread/thr_rwlock.c
+++ b/lib/libthr/thread/thr_rwlock.c
@@ -35,9 +35,6 @@
#include "un-namespace.h"
#include "thr_private.h"
-/* maximum number of times a read lock may be obtained */
-#define MAX_READ_LOCKS (INT_MAX - 1)
-
__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
@@ -56,42 +53,12 @@ static int
rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
{
pthread_rwlock_t prwlock;
- int ret;
-
- /* allocate rwlock object */
- prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
+ prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
if (prwlock == NULL)
return (ENOMEM);
-
- /* initialize the lock */
- if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
- free(prwlock);
- else {
- /* initialize the read condition signal */
- ret = _pthread_cond_init(&prwlock->read_signal, NULL);
-
- if (ret != 0) {
- _pthread_mutex_destroy(&prwlock->lock);
- free(prwlock);
- } else {
- /* initialize the write condition signal */
- ret = _pthread_cond_init(&prwlock->write_signal, NULL);
-
- if (ret != 0) {
- _pthread_cond_destroy(&prwlock->read_signal);
- _pthread_mutex_destroy(&prwlock->lock);
- free(prwlock);
- } else {
- /* success */
- prwlock->state = 0;
- prwlock->blocked_writers = 0;
- *rwlock = prwlock;
- }
- }
- }
-
- return (ret);
+ *rwlock = prwlock;
+ return (0);
}
int
@@ -105,14 +72,9 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
pthread_rwlock_t prwlock;
prwlock = *rwlock;
-
- _pthread_mutex_destroy(&prwlock->lock);
- _pthread_cond_destroy(&prwlock->read_signal);
- _pthread_cond_destroy(&prwlock->write_signal);
- free(prwlock);
-
*rwlock = NULL;
+ free(prwlock);
ret = 0;
}
return (ret);
@@ -143,36 +105,28 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
}
static int
-rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
+rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
{
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
+ struct timespec ts, ts2, *tsp;
+ int flags;
int ret;
- if (rwlock == NULL)
+ if (__predict_false(rwlock == NULL))
return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
- if (prwlock == NULL) {
+ if (__predict_false(prwlock == NULL)) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
- /* grab the monitor lock */
- if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
- return (ret);
-
- /* check lock count */
- if (prwlock->state == MAX_READ_LOCKS) {
- _pthread_mutex_unlock(&prwlock->lock);
- return (EAGAIN);
- }
-
- if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
+ if (curthread->rdlock_count) {
/*
* To avoid having to track all the rdlocks held by
* a thread or all of the threads that hold a rdlock,
@@ -185,36 +139,48 @@ rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
* when it already has one or more rdlocks avoids the
* deadlock. I hope the reader can follow that logic ;-)
*/
- ; /* nothing needed */
+ flags = URWLOCK_PREFER_READER;
} else {
- /* give writers priority over readers */
- while (prwlock->blocked_writers || prwlock->state < 0) {
- if (abstime)
- ret = _pthread_cond_timedwait
- (&prwlock->read_signal,
- &prwlock->lock, abstime);
- else
- ret = _pthread_cond_wait(&prwlock->read_signal,
- &prwlock->lock);
- if (ret != 0) {
- /* can't do a whole lot if this fails */
- _pthread_mutex_unlock(&prwlock->lock);
- return (ret);
- }
- }
+ flags = 0;
}
- curthread->rdlock_count++;
- prwlock->state++; /* indicate we are locked for reading */
-
/*
- * Something is really wrong if this call fails. Returning
- * error won't do because we've already obtained the read
- * lock. Decrementing 'state' is no good because we probably
- * don't have the monitor lock.
+ * POSIX said the validity of the abstimeout parameter need
+ * not be checked if the lock can be immediately acquired.
*/
- _pthread_mutex_unlock(&prwlock->lock);
+ ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
+ if (ret == 0) {
+ curthread->rdlock_count++;
+ return (ret);
+ }
+ if (__predict_false(abstime &&
+ (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
+ return (EINVAL);
+
+ for (;;) {
+ if (abstime) {
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ if (ts2.tv_sec < 0 ||
+ (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
+ return (ETIMEDOUT);
+ tsp = &ts2;
+ } else
+ tsp = NULL;
+
+ /* goto kernel and lock it */
+ ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
+ if (ret != EINTR)
+ break;
+
+ /* if interrupted, try to lock it in userland again. */
+ if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
+ ret = 0;
+ curthread->rdlock_count++;
+ break;
+ }
+ }
return (ret);
}
@@ -236,43 +202,43 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
+ int flags;
int ret;
- if (rwlock == NULL)
+ if (__predict_false(rwlock == NULL))
return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
- if (prwlock == NULL) {
+ if (__predict_false(prwlock == NULL)) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
- /* grab the monitor lock */
- if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
- return (ret);
-
- if (prwlock->state == MAX_READ_LOCKS)
- ret = EAGAIN;
- else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
- /* see comment for pthread_rwlock_rdlock() */
- curthread->rdlock_count++;
- prwlock->state++;
- }
- /* give writers priority over readers */
- else if (prwlock->blocked_writers || prwlock->state < 0)
- ret = EBUSY;
- else {
- curthread->rdlock_count++;
- prwlock->state++; /* indicate we are locked for reading */
+ if (curthread->rdlock_count) {
+ /*
+ * To avoid having to track all the rdlocks held by
+ * a thread or all of the threads that hold a rdlock,
+ * we keep a simple count of all the rdlocks held by
+ * a thread. If a thread holds any rdlocks it is
+ * possible that it is attempting to take a recursive
+ * rdlock. If there are blocked writers and precedence
+ * is given to them, then that would result in the thread
+ * deadlocking. So allowing a thread to take the rdlock
+ * when it already has one or more rdlocks avoids the
+ * deadlock. I hope the reader can follow that logic ;-)
+ */
+ flags = URWLOCK_PREFER_READER;
+ } else {
+ flags = 0;
}
- /* see the comment on this in pthread_rwlock_rdlock */
- _pthread_mutex_unlock(&prwlock->lock);
-
+ ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
+ if (ret == 0)
+ curthread->rdlock_count++;
return (ret);
}
@@ -283,72 +249,22 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
pthread_rwlock_t prwlock;
int ret;
- if (rwlock == NULL)
+ if (__predict_false(rwlock == NULL))
return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
- if (prwlock == NULL) {
+ if (__predict_false(prwlock == NULL)) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
- /* grab the monitor lock */
- if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
- return (ret);
-
- if (prwlock->state != 0)
- ret = EBUSY;
- else
- /* indicate we are locked for writing */
- prwlock->state = -1;
-
- /* see the comment on this in pthread_rwlock_rdlock */
- _pthread_mutex_unlock(&prwlock->lock);
-
- return (ret);
-}
-
-int
-_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
-{
- struct pthread *curthread = _get_curthread();
- pthread_rwlock_t prwlock;
- int ret;
-
- if (rwlock == NULL)
- return (EINVAL);
-
- prwlock = *rwlock;
-
- if (prwlock == NULL)
- return (EINVAL);
-
- /* grab the monitor lock */
- if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
- return (ret);
-
- if (prwlock->state > 0) {
- curthread->rdlock_count--;
- prwlock->state--;
- if (prwlock->state == 0 && prwlock->blocked_writers)
- ret = _pthread_cond_signal(&prwlock->write_signal);
- } else if (prwlock->state < 0) {
- prwlock->state = 0;
-
- if (prwlock->blocked_writers)
- ret = _pthread_cond_signal(&prwlock->write_signal);
- else
- ret = _pthread_cond_broadcast(&prwlock->read_signal);
- } else
- ret = EINVAL;
-
- /* see the comment on this in pthread_rwlock_rdlock */
- _pthread_mutex_unlock(&prwlock->lock);
-
+ ret = _thr_rwlock_trywrlock(&prwlock->lock);
+ if (ret == 0)
+ prwlock->owner = curthread;
return (ret);
}
@@ -357,49 +273,64 @@ rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
{
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
+ struct timespec ts, ts2, *tsp;
int ret;
- if (rwlock == NULL)
+ if (__predict_false(rwlock == NULL))
return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
- if (prwlock == NULL) {
+ if (__predict_false(prwlock == NULL)) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
- /* grab the monitor lock */
- if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ /*
+ * POSIX said the validity of the abstimeout parameter need
+ * not be checked if the lock can be immediately acquired.
+ */
+ ret = _thr_rwlock_trywrlock(&prwlock->lock);
+ if (ret == 0) {
+ prwlock->owner = curthread;
return (ret);
-
- while (prwlock->state != 0) {
- prwlock->blocked_writers++;
-
- if (abstime != NULL)
- ret = _pthread_cond_timedwait(&prwlock->write_signal,
- &prwlock->lock, abstime);
- else
- ret = _pthread_cond_wait(&prwlock->write_signal,
- &prwlock->lock);
- if (ret != 0) {
- prwlock->blocked_writers--;
- _pthread_mutex_unlock(&prwlock->lock);
- return (ret);
- }
-
- prwlock->blocked_writers--;
}
- /* indicate we are locked for writing */
- prwlock->state = -1;
+ if (__predict_false(abstime &&
+ (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
+ return (EINVAL);
- /* see the comment on this in pthread_rwlock_rdlock */
- _pthread_mutex_unlock(&prwlock->lock);
+ for (;;) {
+ if (abstime != NULL) {
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ if (ts2.tv_sec < 0 ||
+ (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
+ return (ETIMEDOUT);
+ tsp = &ts2;
+ } else
+ tsp = NULL;
+
+ /* goto kernel and lock it */
+ ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
+ if (ret == 0) {
+ prwlock->owner = curthread;
+ break;
+ }
+
+ if (ret != EINTR)
+ break;
+ /* if interrupted, try to lock it in userland again. */
+ if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
+ ret = 0;
+ prwlock->owner = curthread;
+ break;
+ }
+ }
return (ret);
}
@@ -415,3 +346,33 @@ _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
{
return (rwlock_wrlock_common (rwlock, abstime));
}
+
+int
+_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
+{
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
+ int32_t state;
+
+ if (__predict_false(rwlock == NULL))
+ return (EINVAL);
+
+ prwlock = *rwlock;
+
+ if (__predict_false(prwlock == NULL))
+ return (EINVAL);
+
+ state = prwlock->lock.rw_state;
+ if (state & URWLOCK_WRITE_OWNER) {
+ if (__predict_false(prwlock->owner != curthread))
+ return (EPERM);
+ prwlock->owner = NULL;
+ }
+
+ ret = _thr_rwlock_unlock(&prwlock->lock);
+ if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
+ curthread->rdlock_count--;
+
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_umtx.c b/lib/libthr/thread/thr_umtx.c
index 0a5ee6a31113..4697e2bcbf26 100644
--- a/lib/libthr/thread/thr_umtx.c
+++ b/lib/libthr/thread/thr_umtx.c
@@ -30,6 +30,15 @@
#include "thr_private.h"
#include "thr_umtx.h"
+#ifndef HAS__UMTX_OP_ERR
+int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
+{
+ if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
+ return (errno);
+ return (0);
+}
+#endif
+
void
_thr_umutex_init(struct umutex *mtx)
{
@@ -41,9 +50,7 @@ _thr_umutex_init(struct umutex *mtx)
int
__thr_umutex_lock(struct umutex *mtx)
{
- if (_umtx_op(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0) != -1)
- return 0;
- return (errno);
+ return _umtx_op(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
}
int
@@ -53,35 +60,28 @@ __thr_umutex_timedlock(struct umutex *mtx,
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0)))
return (ETIMEDOUT);
- if (_umtx_op(mtx, UMTX_OP_MUTEX_LOCK, 0, 0,
- __DECONST(void *, timeout)) != -1)
- return (0);
- return (errno);
+
+ return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0,
+ __DECONST(void *, timeout));
}
int
__thr_umutex_unlock(struct umutex *mtx)
{
- if (_umtx_op(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0) != -1)
- return (0);
- return (errno);
+ return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
}
int
__thr_umutex_trylock(struct umutex *mtx)
{
- if (_umtx_op(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0) != -1)
- return (0);
- return (errno);
+ return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
}
int
__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
uint32_t *oldceiling)
{
- if (_umtx_op(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0) != -1)
- return (0);
- return (errno);
+ return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
}
int
@@ -90,19 +90,25 @@ _thr_umtx_wait(volatile umtx_t *mtx, long id, const struct timespec *timeout)
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0)))
return (ETIMEDOUT);
- if (_umtx_op(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
- __DECONST(void*, timeout)) != -1)
- return (0);
- return (errno);
+ return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
+ __DECONST(void*, timeout));
+}
+
+int
+_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout)
+{
+ if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
+ timeout->tv_nsec <= 0)))
+ return (ETIMEDOUT);
+ return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT_UINT, id, 0,
+ __DECONST(void*, timeout));
}
int
_thr_umtx_wake(volatile umtx_t *mtx, int nr_wakeup)
{
- if (_umtx_op(__DEVOLATILE(void *, mtx), UMTX_OP_WAKE,
- nr_wakeup, 0, 0) != -1)
- return (0);
- return (errno);
+ return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAKE,
+ nr_wakeup, 0, 0);
}
void
@@ -120,12 +126,9 @@ _thr_ucond_wait(struct ucond *cv, struct umutex *m,
__thr_umutex_unlock(m);
return (ETIMEDOUT);
}
- if (_umtx_op(cv, UMTX_OP_CV_WAIT,
+ return _umtx_op_err(cv, UMTX_OP_CV_WAIT,
check_unparking ? UMTX_CHECK_UNPARKING : 0,
- m, __DECONST(void*, timeout)) != -1) {
- return (0);
- }
- return (errno);
+ m, __DECONST(void*, timeout));
}
int
@@ -133,9 +136,7 @@ _thr_ucond_signal(struct ucond *cv)
{
if (!cv->c_has_waiters)
return (0);
- if (_umtx_op(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL) != -1)
- return (0);
- return (errno);
+ return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
}
int
@@ -143,7 +144,23 @@ _thr_ucond_broadcast(struct ucond *cv)
{
if (!cv->c_has_waiters)
return (0);
- if (_umtx_op(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL) != -1)
- return (0);
- return (errno);
+ return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
+}
+
+int
+__thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
+{
+ return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp);
+}
+
+int
+__thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
+{
+ return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp);
+}
+
+int
+__thr_rwlock_unlock(struct urwlock *rwlock)
+{
+ return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
}
diff --git a/lib/libthr/thread/thr_umtx.h b/lib/libthr/thread/thr_umtx.h
index 1a60c1a178c2..cddd3cfb68e4 100644
--- a/lib/libthr/thread/thr_umtx.h
+++ b/lib/libthr/thread/thr_umtx.h
@@ -54,6 +54,10 @@ void _thr_ucond_init(struct ucond *cv) __hidden;
int _thr_ucond_signal(struct ucond *cv) __hidden;
int _thr_ucond_broadcast(struct ucond *cv) __hidden;
+int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) __hidden;
+int __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) __hidden;
+int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden;
+
static inline int
_thr_umutex_trylock(struct umutex *mtx, uint32_t id)
{
@@ -97,4 +101,81 @@ _thr_umutex_unlock(struct umutex *mtx, uint32_t id)
return (__thr_umutex_unlock(mtx));
}
+static inline int
+_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
+{
+ int32_t state;
+ int32_t wrflags;
+
+ if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER)
+ wrflags = URWLOCK_WRITE_OWNER;
+ else
+ wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
+ state = rwlock->rw_state;
+ while (!(state & wrflags)) {
+ if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS))
+ return (EAGAIN);
+ if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
+ return (0);
+ state = rwlock->rw_state;
+ }
+
+ return (EBUSY);
+}
+
+static inline int
+_thr_rwlock_trywrlock(struct urwlock *rwlock)
+{
+ int32_t state;
+
+ state = rwlock->rw_state;
+ while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
+ if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER))
+ return (0);
+ state = rwlock->rw_state;
+ }
+
+ return (EBUSY);
+}
+
+static inline int
+_thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
+{
+ if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
+ return (0);
+ return (__thr_rwlock_rdlock(rwlock, flags, tsp));
+}
+
+static inline int
+_thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
+{
+ if (_thr_rwlock_trywrlock(rwlock) == 0)
+ return (0);
+ return (__thr_rwlock_wrlock(rwlock, tsp));
+}
+
+static inline int
+_thr_rwlock_unlock(struct urwlock *rwlock)
+{
+ int32_t state;
+
+ state = rwlock->rw_state;
+ if (state & URWLOCK_WRITE_OWNER) {
+ if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0))
+ return (0);
+ } else {
+ for (;;) {
+ if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
+ return (EPERM);
+ if (!((state & URWLOCK_WRITE_WAITERS) && URWLOCK_READER_COUNT(state) == 1)) {
+ if (atomic_cmpset_rel_32(&rwlock->rw_state, state, state-1))
+ return (0);
+ state = rwlock->rw_state;
+ } else {
+ break;
+ }
+ }
+ }
+ return (__thr_rwlock_unlock(rwlock));
+}
#endif