summaryrefslogtreecommitdiff
path: root/lib/libthr/thread
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libthr/thread')
-rw-r--r--lib/libthr/thread/Makefile.inc60
-rw-r--r--lib/libthr/thread/thr_affinity.c87
-rw-r--r--lib/libthr/thread/thr_attr.c660
-rw-r--r--lib/libthr/thread/thr_autoinit.c68
-rw-r--r--lib/libthr/thread/thr_barrier.c174
-rw-r--r--lib/libthr/thread/thr_barrierattr.c97
-rw-r--r--lib/libthr/thread/thr_cancel.c181
-rw-r--r--lib/libthr/thread/thr_clean.c104
-rw-r--r--lib/libthr/thread/thr_concurrency.c68
-rw-r--r--lib/libthr/thread/thr_cond.c546
-rw-r--r--lib/libthr/thread/thr_condattr.c130
-rw-r--r--lib/libthr/thread/thr_create.c296
-rw-r--r--lib/libthr/thread/thr_ctrdtr.c56
-rw-r--r--lib/libthr/thread/thr_detach.c69
-rw-r--r--lib/libthr/thread/thr_equal.c47
-rw-r--r--lib/libthr/thread/thr_event.c68
-rw-r--r--lib/libthr/thread/thr_exit.c329
-rw-r--r--lib/libthr/thread/thr_fork.c267
-rw-r--r--lib/libthr/thread/thr_getcpuclockid.c52
-rw-r--r--lib/libthr/thread/thr_getprio.c59
-rw-r--r--lib/libthr/thread/thr_getschedparam.c71
-rw-r--r--lib/libthr/thread/thr_getthreadid_np.c51
-rw-r--r--lib/libthr/thread/thr_info.c111
-rw-r--r--lib/libthr/thread/thr_init.c496
-rw-r--r--lib/libthr/thread/thr_join.c151
-rw-r--r--lib/libthr/thread/thr_kern.c215
-rw-r--r--lib/libthr/thread/thr_kill.c76
-rw-r--r--lib/libthr/thread/thr_list.c364
-rw-r--r--lib/libthr/thread/thr_main_np.c53
-rw-r--r--lib/libthr/thread/thr_multi_np.c53
-rw-r--r--lib/libthr/thread/thr_mutex.c1190
-rw-r--r--lib/libthr/thread/thr_mutexattr.c295
-rw-r--r--lib/libthr/thread/thr_once.c105
-rw-r--r--lib/libthr/thread/thr_printf.c153
-rw-r--r--lib/libthr/thread/thr_private.h1015
-rw-r--r--lib/libthr/thread/thr_pshared.c268
-rw-r--r--lib/libthr/thread/thr_pspinlock.c155
-rw-r--r--lib/libthr/thread/thr_resume_np.c99
-rw-r--r--lib/libthr/thread/thr_rtld.c244
-rw-r--r--lib/libthr/thread/thr_rwlock.c377
-rw-r--r--lib/libthr/thread/thr_rwlockattr.c95
-rw-r--r--lib/libthr/thread/thr_self.c50
-rw-r--r--lib/libthr/thread/thr_sem.c118
-rw-r--r--lib/libthr/thread/thr_setprio.c69
-rw-r--r--lib/libthr/thread/thr_setschedparam.c80
-rw-r--r--lib/libthr/thread/thr_sig.c763
-rw-r--r--lib/libthr/thread/thr_single_np.c53
-rw-r--r--lib/libthr/thread/thr_sleepq.c186
-rw-r--r--lib/libthr/thread/thr_spec.c246
-rw-r--r--lib/libthr/thread/thr_spinlock.c126
-rw-r--r--lib/libthr/thread/thr_stack.c320
-rw-r--r--lib/libthr/thread/thr_suspend_np.c186
-rw-r--r--lib/libthr/thread/thr_switch_np.c60
-rw-r--r--lib/libthr/thread/thr_symbols.c61
-rw-r--r--lib/libthr/thread/thr_syscalls.c694
-rw-r--r--lib/libthr/thread/thr_umtx.c376
-rw-r--r--lib/libthr/thread/thr_umtx.h272
-rw-r--r--lib/libthr/thread/thr_yield.c48
58 files changed, 12763 insertions, 0 deletions
diff --git a/lib/libthr/thread/Makefile.inc b/lib/libthr/thread/Makefile.inc
new file mode 100644
index 0000000000000..795ed3989abe3
--- /dev/null
+++ b/lib/libthr/thread/Makefile.inc
@@ -0,0 +1,60 @@
+# $FreeBSD$
+
+# thr sources
+.PATH: ${.CURDIR}/thread
+
+SRCS+= \
+ thr_affinity.c \
+ thr_attr.c \
+ thr_barrier.c \
+ thr_barrierattr.c \
+ thr_cancel.c \
+ thr_clean.c \
+ thr_concurrency.c \
+ thr_cond.c \
+ thr_condattr.c \
+ thr_create.c \
+ thr_ctrdtr.c \
+ thr_detach.c \
+ thr_equal.c \
+ thr_event.c \
+ thr_exit.c \
+ thr_fork.c \
+ thr_getprio.c \
+ thr_getcpuclockid.c \
+ thr_getschedparam.c \
+ thr_getthreadid_np.c \
+ thr_info.c \
+ thr_init.c \
+ thr_join.c \
+ thr_list.c \
+ thr_kern.c \
+ thr_kill.c \
+ thr_main_np.c \
+ thr_multi_np.c \
+ thr_mutex.c \
+ thr_mutexattr.c \
+ thr_once.c \
+ thr_printf.c \
+ thr_pshared.c \
+ thr_pspinlock.c \
+ thr_resume_np.c \
+ thr_rtld.c \
+ thr_rwlock.c \
+ thr_rwlockattr.c \
+ thr_self.c \
+ thr_sem.c \
+ thr_setprio.c \
+ thr_setschedparam.c \
+ thr_sig.c \
+ thr_single_np.c \
+ thr_sleepq.c \
+ thr_spec.c \
+ thr_spinlock.c \
+ thr_stack.c \
+ thr_syscalls.c \
+ thr_suspend_np.c \
+ thr_switch_np.c \
+ thr_symbols.c \
+ thr_umtx.c \
+ thr_yield.c
diff --git a/lib/libthr/thread/thr_affinity.c b/lib/libthr/thread/thr_affinity.c
new file mode 100644
index 0000000000000..1a820b72b9f31
--- /dev/null
+++ b/lib/libthr/thread/thr_affinity.c
@@ -0,0 +1,87 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2008, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread_np.h>
+#include <sys/param.h>
+#include <sys/cpuset.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_getaffinity_np, pthread_getaffinity_np);
+__weak_reference(_pthread_setaffinity_np, pthread_setaffinity_np);
+
+int
+_pthread_setaffinity_np(pthread_t td, size_t cpusetsize, const cpuset_t *cpusetp)
+{
+ struct pthread *curthread = _get_curthread();
+ lwpid_t tid;
+ int error;
+
+ if (td == curthread) {
+ error = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
+ -1, cpusetsize, cpusetp);
+ if (error == -1)
+ error = errno;
+ } else if ((error = _thr_find_thread(curthread, td, 0)) == 0) {
+ tid = TID(td);
+ error = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, tid,
+ cpusetsize, cpusetp);
+ if (error == -1)
+ error = errno;
+ THR_THREAD_UNLOCK(curthread, td);
+ }
+ return (error);
+}
+
+int
+_pthread_getaffinity_np(pthread_t td, size_t cpusetsize, cpuset_t *cpusetp)
+{
+ struct pthread *curthread = _get_curthread();
+ lwpid_t tid;
+ int error;
+
+ if (td == curthread) {
+ error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
+ -1, cpusetsize, cpusetp);
+ if (error == -1)
+ error = errno;
+ } else if ((error = _thr_find_thread(curthread, td, 0)) == 0) {
+ tid = TID(td);
+ error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, tid,
+ cpusetsize, cpusetp);
+ if (error == -1)
+ error = errno;
+ THR_THREAD_UNLOCK(curthread, td);
+ }
+ return (error);
+}
diff --git a/lib/libthr/thread/thr_attr.c b/lib/libthr/thread/thr_attr.c
new file mode 100644
index 0000000000000..0290c089be5cc
--- /dev/null
+++ b/lib/libthr/thread/thr_attr.c
@@ -0,0 +1,660 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 2003 Craig Rodrigues <rodrigc@attbi.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Craig Rodrigues.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CRAIG RODRIGUES AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.
+ * Copyright (c) 2002,2003 Alexey Zelkin <phantom@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer
+ * unmodified other than the allowable addition of one or more
+ * copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread_np.h>
+#include <sys/sysctl.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+static size_t _get_kern_cpuset_size(void);
+
+__weak_reference(_pthread_attr_destroy, pthread_attr_destroy);
+
+int
+_pthread_attr_destroy(pthread_attr_t *attr)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL)
+ /* Invalid argument: */
+ ret = EINVAL;
+ else {
+ if ((*attr)->cpuset != NULL)
+ free((*attr)->cpuset);
+ /* Free the memory allocated to the attribute object: */
+ free(*attr);
+
+ /*
+ * Leave the attribute pointer NULL now that the memory
+ * has been freed:
+ */
+ *attr = NULL;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
+
+int
+_pthread_attr_get_np(pthread_t pthread, pthread_attr_t *dstattr)
+{
+ struct pthread *curthread;
+ struct pthread_attr attr, *dst;
+ int ret;
+ size_t kern_size;
+
+ if (pthread == NULL || dstattr == NULL || (dst = *dstattr) == NULL)
+ return (EINVAL);
+ kern_size = _get_kern_cpuset_size();
+ if (dst->cpuset == NULL) {
+ dst->cpuset = calloc(1, kern_size);
+ dst->cpusetsize = kern_size;
+ }
+ curthread = _get_curthread();
+ if ((ret = _thr_find_thread(curthread, pthread, /*include dead*/0)) != 0)
+ return (ret);
+ attr = pthread->attr;
+ if (pthread->flags & THR_FLAGS_DETACHED)
+ attr.flags |= PTHREAD_DETACHED;
+ ret = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, TID(pthread),
+ dst->cpusetsize, dst->cpuset);
+ if (ret == -1)
+ ret = errno;
+ THR_THREAD_UNLOCK(curthread, pthread);
+ if (ret == 0) {
+ memcpy(&dst->pthread_attr_start_copy,
+ &attr.pthread_attr_start_copy,
+ offsetof(struct pthread_attr, pthread_attr_end_copy) -
+ offsetof(struct pthread_attr, pthread_attr_start_copy));
+ }
+ return (ret);
+}
+
+__weak_reference(_pthread_attr_getdetachstate, pthread_attr_getdetachstate);
+
+int
+_pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || detachstate == NULL)
+ ret = EINVAL;
+ else {
+ /* Check if the detached flag is set: */
+ if ((*attr)->flags & PTHREAD_DETACHED)
+ /* Return detached: */
+ *detachstate = PTHREAD_CREATE_DETACHED;
+ else
+ /* Return joinable: */
+ *detachstate = PTHREAD_CREATE_JOINABLE;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getguardsize, pthread_attr_getguardsize);
+
+int
+_pthread_attr_getguardsize(const pthread_attr_t * __restrict attr,
+ size_t * __restrict guardsize)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || guardsize == NULL)
+ ret = EINVAL;
+ else {
+ /* Return the guard size: */
+ *guardsize = (*attr)->guardsize_attr;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getinheritsched, pthread_attr_getinheritsched);
+
+int
+_pthread_attr_getinheritsched(const pthread_attr_t * __restrict attr,
+ int * __restrict sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ *sched_inherit = (*attr)->sched_inherit;
+
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getschedparam, pthread_attr_getschedparam);
+
+int
+_pthread_attr_getschedparam(const pthread_attr_t * __restrict attr,
+ struct sched_param * __restrict param)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ param->sched_priority = (*attr)->prio;
+
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getschedpolicy, pthread_attr_getschedpolicy);
+
+int
+_pthread_attr_getschedpolicy(const pthread_attr_t * __restrict attr,
+ int * __restrict policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (policy == NULL))
+ ret = EINVAL;
+ else
+ *policy = (*attr)->sched_policy;
+
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getscope, pthread_attr_getscope);
+
+int
+_pthread_attr_getscope(const pthread_attr_t * __restrict attr,
+ int * __restrict contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL))
+ /* Return an invalid argument: */
+ ret = EINVAL;
+
+ else
+ *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
+ PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
+
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getstack, pthread_attr_getstack);
+
+int
+_pthread_attr_getstack(const pthread_attr_t * __restrict attr,
+ void ** __restrict stackaddr,
+ size_t * __restrict stacksize)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL
+ || stacksize == NULL )
+ ret = EINVAL;
+ else {
+ /* Return the stack address and size */
+ *stackaddr = (*attr)->stackaddr_attr;
+ *stacksize = (*attr)->stacksize_attr;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getstackaddr, pthread_attr_getstackaddr);
+
+int
+_pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL)
+ ret = EINVAL;
+ else {
+ /* Return the stack address: */
+ *stackaddr = (*attr)->stackaddr_attr;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_getstacksize, pthread_attr_getstacksize);
+
+int
+_pthread_attr_getstacksize(const pthread_attr_t * __restrict attr,
+ size_t * __restrict stacksize)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stacksize == NULL)
+ ret = EINVAL;
+ else {
+ /* Return the stack size: */
+ *stacksize = (*attr)->stacksize_attr;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_init, pthread_attr_init);
+
+int
+_pthread_attr_init(pthread_attr_t *attr)
+{
+ int ret;
+ pthread_attr_t pattr;
+
+ _thr_check_init();
+
+ /* Allocate memory for the attribute object: */
+ if ((pattr = (pthread_attr_t) malloc(sizeof(struct pthread_attr))) == NULL)
+ /* Insufficient memory: */
+ ret = ENOMEM;
+ else {
+ /* Initialise the attribute object with the defaults: */
+ memcpy(pattr, &_pthread_attr_default, sizeof(struct pthread_attr));
+
+ /* Return a pointer to the attribute object: */
+ *attr = pattr;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setcreatesuspend_np, pthread_attr_setcreatesuspend_np);
+
+int
+_pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
+{
+ int ret;
+
+ if (attr == NULL || *attr == NULL) {
+ ret = EINVAL;
+ } else {
+ (*attr)->suspend = THR_CREATE_SUSPENDED;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setdetachstate, pthread_attr_setdetachstate);
+
+int
+_pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL ||
+ (detachstate != PTHREAD_CREATE_DETACHED &&
+ detachstate != PTHREAD_CREATE_JOINABLE))
+ ret = EINVAL;
+ else {
+ /* Check if detached state: */
+ if (detachstate == PTHREAD_CREATE_DETACHED)
+ /* Set the detached flag: */
+ (*attr)->flags |= PTHREAD_DETACHED;
+ else
+ /* Reset the detached flag: */
+ (*attr)->flags &= ~PTHREAD_DETACHED;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setguardsize, pthread_attr_setguardsize);
+
+int
+_pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
+{
+ int ret;
+
+ /* Check for invalid arguments. */
+ if (attr == NULL || *attr == NULL)
+ ret = EINVAL;
+ else {
+ /* Save the stack size. */
+ (*attr)->guardsize_attr = guardsize;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setinheritsched, pthread_attr_setinheritsched);
+
+int
+_pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else if (sched_inherit != PTHREAD_INHERIT_SCHED &&
+ sched_inherit != PTHREAD_EXPLICIT_SCHED)
+ ret = ENOTSUP;
+ else
+ (*attr)->sched_inherit = sched_inherit;
+
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setschedparam, pthread_attr_setschedparam);
+
+int
+_pthread_attr_setschedparam(pthread_attr_t * __restrict attr,
+ const struct sched_param * __restrict param)
+{
+ int policy;
+
+ if ((attr == NULL) || (*attr == NULL))
+ return (EINVAL);
+
+ if (param == NULL)
+ return (ENOTSUP);
+
+ policy = (*attr)->sched_policy;
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR) {
+ if (param->sched_priority < _thr_priorities[policy-1].pri_min ||
+ param->sched_priority > _thr_priorities[policy-1].pri_max)
+ return (ENOTSUP);
+ } else {
+ /*
+ * Ignore it for SCHED_OTHER now, patches for glib ports
+ * are wrongly using M:N thread library's internal macro
+ * THR_MIN_PRIORITY and THR_MAX_PRIORITY.
+ */
+ }
+
+ (*attr)->prio = param->sched_priority;
+
+ return (0);
+}
+
+__weak_reference(_pthread_attr_setschedpolicy, pthread_attr_setschedpolicy);
+
+int
+_pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else if ((policy < SCHED_FIFO) || (policy > SCHED_RR)) {
+ ret = ENOTSUP;
+ } else {
+ (*attr)->sched_policy = policy;
+ (*attr)->prio = _thr_priorities[policy-1].pri_default;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setscope, pthread_attr_setscope);
+
+int
+_pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL)) {
+ /* Return an invalid argument: */
+ ret = EINVAL;
+ } else if ((contentionscope != PTHREAD_SCOPE_PROCESS) &&
+ (contentionscope != PTHREAD_SCOPE_SYSTEM)) {
+ ret = EINVAL;
+ } else if (contentionscope == PTHREAD_SCOPE_SYSTEM) {
+ (*attr)->flags |= contentionscope;
+ } else {
+ (*attr)->flags &= ~PTHREAD_SCOPE_SYSTEM;
+ }
+ return (ret);
+}
+
+__weak_reference(_pthread_attr_setstack, pthread_attr_setstack);
+
+int
+_pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr,
+ size_t stacksize)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL
+ || stacksize < PTHREAD_STACK_MIN)
+ ret = EINVAL;
+ else {
+ /* Save the stack address and stack size */
+ (*attr)->stackaddr_attr = stackaddr;
+ (*attr)->stacksize_attr = stacksize;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setstackaddr, pthread_attr_setstackaddr);
+
+int
+_pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL)
+ ret = EINVAL;
+ else {
+ /* Save the stack address: */
+ (*attr)->stackaddr_attr = stackaddr;
+ ret = 0;
+ }
+ return(ret);
+}
+
+__weak_reference(_pthread_attr_setstacksize, pthread_attr_setstacksize);
+
+int
+_pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
+{
+ int ret;
+
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stacksize < PTHREAD_STACK_MIN)
+ ret = EINVAL;
+ else {
+ /* Save the stack size: */
+ (*attr)->stacksize_attr = stacksize;
+ ret = 0;
+ }
+ return(ret);
+}
+
+static size_t
+_get_kern_cpuset_size(void)
+{
+ static int kern_cpuset_size = 0;
+
+ if (kern_cpuset_size == 0) {
+ size_t len;
+
+ len = sizeof(kern_cpuset_size);
+ if (sysctlbyname("kern.sched.cpusetsize", &kern_cpuset_size,
+ &len, NULL, 0))
+ PANIC("failed to get sysctl kern.sched.cpusetsize");
+ }
+
+ return (kern_cpuset_size);
+}
+
+__weak_reference(_pthread_attr_setaffinity_np, pthread_attr_setaffinity_np);
+int
+_pthread_attr_setaffinity_np(pthread_attr_t *pattr, size_t cpusetsize,
+ const cpuset_t *cpusetp)
+{
+ pthread_attr_t attr;
+ int ret;
+
+ if (pattr == NULL || (attr = (*pattr)) == NULL)
+ ret = EINVAL;
+ else {
+ if (cpusetsize == 0 || cpusetp == NULL) {
+ if (attr->cpuset != NULL) {
+ free(attr->cpuset);
+ attr->cpuset = NULL;
+ attr->cpusetsize = 0;
+ }
+ return (0);
+ }
+ size_t kern_size = _get_kern_cpuset_size();
+ /* Kernel rejects small set, we check it here too. */
+ if (cpusetsize < kern_size)
+ return (ERANGE);
+ if (cpusetsize > kern_size) {
+ /* Kernel checks invalid bits, we check it here too. */
+ size_t i;
+ for (i = kern_size; i < cpusetsize; ++i) {
+ if (((const char *)cpusetp)[i])
+ return (EINVAL);
+ }
+ }
+ if (attr->cpuset == NULL) {
+ attr->cpuset = calloc(1, kern_size);
+ if (attr->cpuset == NULL)
+ return (errno);
+ attr->cpusetsize = kern_size;
+ }
+ memcpy(attr->cpuset, cpusetp, kern_size);
+ ret = 0;
+ }
+ return (ret);
+}
+
+__weak_reference(_pthread_attr_getaffinity_np, pthread_attr_getaffinity_np);
+int
+_pthread_attr_getaffinity_np(const pthread_attr_t *pattr, size_t cpusetsize,
+ cpuset_t *cpusetp)
+{
+ pthread_attr_t attr;
+ int ret = 0;
+
+ if (pattr == NULL || (attr = (*pattr)) == NULL)
+ ret = EINVAL;
+ else {
+ /* Kernel rejects small set, we check it here too. */
+ size_t kern_size = _get_kern_cpuset_size();
+ if (cpusetsize < kern_size)
+ return (ERANGE);
+ if (attr->cpuset != NULL)
+ memcpy(cpusetp, attr->cpuset, MIN(cpusetsize,
+ attr->cpusetsize));
+ else
+ memset(cpusetp, -1, kern_size);
+ if (cpusetsize > kern_size)
+ memset(((char *)cpusetp) + kern_size, 0,
+ cpusetsize - kern_size);
+ }
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_autoinit.c b/lib/libthr/thread/thr_autoinit.c
new file mode 100644
index 0000000000000..d741f3cf1af73
--- /dev/null
+++ b/lib/libthr/thread/thr_autoinit.c
@@ -0,0 +1,68 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 2002 Alfred Perlstein <alfred@freebsd.org>.
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <pthread.h>
+
+#include "thr_private.h"
+
+/*
+ * This module uses GCC extentions to initialize the
+ * threads package at program start-up time.
+ */
+
+void _thread_init_hack(void) __attribute__ ((constructor));
+
+void
+_thread_init_hack(void)
+{
+
+ _thread_init();
+}
+
+/*
+ * For the shared version of the threads library, the above is sufficient.
+ * But for the archive version of the library, we need a little bit more.
+ * Namely, we must arrange for this particular module to be pulled in from
+ * the archive library at link time. To accomplish that, we define and
+ * initialize a variable, "_thread_autoinit_dummy_decl". This variable is
+ * referenced (as an extern) from libc/stdlib/exit.c. This will always
+ * create a need for this module, ensuring that it is present in the
+ * executable.
+ */
+extern int _thread_autoinit_dummy_decl;
+int _thread_autoinit_dummy_decl = 0;
diff --git a/lib/libthr/thread/thr_barrier.c b/lib/libthr/thread/thr_barrier.c
new file mode 100644
index 0000000000000..fa205ad509c95
--- /dev/null
+++ b/lib/libthr/thread/thr_barrier.c
@@ -0,0 +1,174 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2003 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+_Static_assert(sizeof(struct pthread_barrier) <= PAGE_SIZE,
+ "pthread_barrier is too large for off-page");
+
+__weak_reference(_pthread_barrier_init, pthread_barrier_init);
+__weak_reference(_pthread_barrier_wait, pthread_barrier_wait);
+__weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy);
+
+int
+_pthread_barrier_destroy(pthread_barrier_t *barrier)
+{
+ pthread_barrier_t bar;
+ struct pthread *curthread;
+ int pshared;
+
+ if (barrier == NULL || *barrier == NULL)
+ return (EINVAL);
+
+ if (*barrier == THR_PSHARED_PTR) {
+ bar = __thr_pshared_offpage(barrier, 0);
+ if (bar == NULL) {
+ *barrier = NULL;
+ return (0);
+ }
+ pshared = 1;
+ } else {
+ bar = *barrier;
+ pshared = 0;
+ }
+ curthread = _get_curthread();
+ THR_UMUTEX_LOCK(curthread, &bar->b_lock);
+ if (bar->b_destroying) {
+ THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
+ return (EBUSY);
+ }
+ bar->b_destroying = 1;
+ do {
+ if (bar->b_waiters > 0) {
+ bar->b_destroying = 0;
+ THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
+ return (EBUSY);
+ }
+ if (bar->b_refcount != 0) {
+ _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0);
+ THR_UMUTEX_LOCK(curthread, &bar->b_lock);
+ } else
+ break;
+ } while (1);
+ bar->b_destroying = 0;
+ THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
+
+ *barrier = NULL;
+ if (pshared)
+ __thr_pshared_destroy(barrier);
+ else
+ free(bar);
+ return (0);
+}
+
+int
+_pthread_barrier_init(pthread_barrier_t * __restrict barrier,
+ const pthread_barrierattr_t * __restrict attr, unsigned count)
+{
+ pthread_barrier_t bar;
+ int pshared;
+
+ if (barrier == NULL || count == 0 || count > INT_MAX)
+ return (EINVAL);
+
+ if (attr == NULL || *attr == NULL ||
+ (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
+ bar = calloc(1, sizeof(struct pthread_barrier));
+ if (bar == NULL)
+ return (ENOMEM);
+ *barrier = bar;
+ pshared = 0;
+ } else {
+ bar = __thr_pshared_offpage(barrier, 1);
+ if (bar == NULL)
+ return (EFAULT);
+ *barrier = THR_PSHARED_PTR;
+ pshared = 1;
+ }
+
+ _thr_umutex_init(&bar->b_lock);
+ _thr_ucond_init(&bar->b_cv);
+ if (pshared) {
+ bar->b_lock.m_flags |= USYNC_PROCESS_SHARED;
+ bar->b_cv.c_flags |= USYNC_PROCESS_SHARED;
+ }
+ bar->b_count = count;
+ return (0);
+}
+
+int
+_pthread_barrier_wait(pthread_barrier_t *barrier)
+{
+ struct pthread *curthread;
+ pthread_barrier_t bar;
+ int64_t cycle;
+ int ret;
+
+ if (barrier == NULL || *barrier == NULL)
+ return (EINVAL);
+
+ if (*barrier == THR_PSHARED_PTR) {
+ bar = __thr_pshared_offpage(barrier, 0);
+ if (bar == NULL)
+ return (EINVAL);
+ } else {
+ bar = *barrier;
+ }
+ curthread = _get_curthread();
+ THR_UMUTEX_LOCK(curthread, &bar->b_lock);
+ if (++bar->b_waiters == bar->b_count) {
+ /* Current thread is lastest thread */
+ bar->b_waiters = 0;
+ bar->b_cycle++;
+ _thr_ucond_broadcast(&bar->b_cv);
+ THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
+ ret = PTHREAD_BARRIER_SERIAL_THREAD;
+ } else {
+ cycle = bar->b_cycle;
+ bar->b_refcount++;
+ do {
+ _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0);
+ THR_UMUTEX_LOCK(curthread, &bar->b_lock);
+ /* test cycle to avoid bogus wakeup */
+ } while (cycle == bar->b_cycle);
+ if (--bar->b_refcount == 0 && bar->b_destroying)
+ _thr_ucond_broadcast(&bar->b_cv);
+ THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
+ ret = 0;
+ }
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_barrierattr.c b/lib/libthr/thread/thr_barrierattr.c
new file mode 100644
index 0000000000000..09d8c9cff6061
--- /dev/null
+++ b/lib/libthr/thread/thr_barrierattr.c
@@ -0,0 +1,97 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2003 David Xu <davidxu@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_barrierattr_destroy, pthread_barrierattr_destroy);
+__weak_reference(_pthread_barrierattr_init, pthread_barrierattr_init);
+__weak_reference(_pthread_barrierattr_setpshared,
+ pthread_barrierattr_setpshared);
+__weak_reference(_pthread_barrierattr_getpshared,
+ pthread_barrierattr_getpshared);
+
+int
+_pthread_barrierattr_destroy(pthread_barrierattr_t *attr)
+{
+
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+
+ free(*attr);
+ return (0);
+}
+
+int
+_pthread_barrierattr_getpshared(const pthread_barrierattr_t * __restrict attr,
+ int * __restrict pshared)
+{
+
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+
+ *pshared = (*attr)->pshared;
+ return (0);
+}
+
+int
+_pthread_barrierattr_init(pthread_barrierattr_t *attr)
+{
+
+ if (attr == NULL)
+ return (EINVAL);
+
+ if ((*attr = malloc(sizeof(struct pthread_barrierattr))) == NULL)
+ return (ENOMEM);
+
+ (*attr)->pshared = PTHREAD_PROCESS_PRIVATE;
+ return (0);
+}
+
+int
+_pthread_barrierattr_setpshared(pthread_barrierattr_t *attr, int pshared)
+{
+
+ if (attr == NULL || *attr == NULL ||
+ (pshared != PTHREAD_PROCESS_PRIVATE &&
+ pshared != PTHREAD_PROCESS_SHARED))
+ return (EINVAL);
+
+ (*attr)->pshared = pshared;
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_cancel.c b/lib/libthr/thread/thr_cancel.c
new file mode 100644
index 0000000000000..fdcdb2c1051fd
--- /dev/null
+++ b/lib/libthr/thread/thr_cancel.c
@@ -0,0 +1,181 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_cancel, pthread_cancel);
+__weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
+__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
+__weak_reference(_pthread_testcancel, pthread_testcancel);
+
+static inline void
+testcancel(struct pthread *curthread)
+{
+ if (__predict_false(SHOULD_CANCEL(curthread) &&
+ !THR_IN_CRITICAL(curthread)))
+ _pthread_exit(PTHREAD_CANCELED);
+}
+
+void
+_thr_testcancel(struct pthread *curthread)
+{
+ testcancel(curthread);
+}
+
+int
+_pthread_cancel(pthread_t pthread)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ /*
+ * POSIX says _pthread_cancel should be async cancellation safe.
+ * _thr_find_thread and THR_THREAD_UNLOCK will enter and leave critical
+ * region automatically.
+ */
+ if ((ret = _thr_find_thread(curthread, pthread, 0)) == 0) {
+ if (!pthread->cancel_pending) {
+ pthread->cancel_pending = 1;
+ if (pthread->state != PS_DEAD)
+ _thr_send_sig(pthread, SIGCANCEL);
+ }
+ THR_THREAD_UNLOCK(curthread, pthread);
+ }
+ return (ret);
+}
+
+int
+_pthread_setcancelstate(int state, int *oldstate)
+{
+ struct pthread *curthread = _get_curthread();
+ int oldval;
+
+ oldval = curthread->cancel_enable;
+ switch (state) {
+ case PTHREAD_CANCEL_DISABLE:
+ curthread->cancel_enable = 0;
+ break;
+ case PTHREAD_CANCEL_ENABLE:
+ curthread->cancel_enable = 1;
+ if (curthread->cancel_async)
+ testcancel(curthread);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (oldstate) {
+ *oldstate = oldval ? PTHREAD_CANCEL_ENABLE :
+ PTHREAD_CANCEL_DISABLE;
+ }
+ return (0);
+}
+
+int
+_pthread_setcanceltype(int type, int *oldtype)
+{
+ struct pthread *curthread = _get_curthread();
+ int oldval;
+
+ oldval = curthread->cancel_async;
+ switch (type) {
+ case PTHREAD_CANCEL_ASYNCHRONOUS:
+ curthread->cancel_async = 1;
+ testcancel(curthread);
+ break;
+ case PTHREAD_CANCEL_DEFERRED:
+ curthread->cancel_async = 0;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (oldtype) {
+ *oldtype = oldval ? PTHREAD_CANCEL_ASYNCHRONOUS :
+ PTHREAD_CANCEL_DEFERRED;
+ }
+ return (0);
+}
+
+void
+_pthread_testcancel(void)
+{
+ struct pthread *curthread;
+
+ _thr_check_init();
+ curthread = _get_curthread();
+ testcancel(curthread);
+}
+
+void
+_thr_cancel_enter(struct pthread *curthread)
+{
+ curthread->cancel_point = 1;
+ testcancel(curthread);
+}
+
+void
+_thr_cancel_enter2(struct pthread *curthread, int maycancel)
+{
+ curthread->cancel_point = 1;
+ if (__predict_false(SHOULD_CANCEL(curthread) &&
+ !THR_IN_CRITICAL(curthread))) {
+ if (!maycancel)
+ thr_wake(curthread->tid);
+ else
+ _pthread_exit(PTHREAD_CANCELED);
+ }
+}
+
+void
+_thr_cancel_leave(struct pthread *curthread, int maycancel)
+{
+ curthread->cancel_point = 0;
+ if (__predict_false(SHOULD_CANCEL(curthread) &&
+ !THR_IN_CRITICAL(curthread) && maycancel))
+ _pthread_exit(PTHREAD_CANCELED);
+}
+
+void
+_pthread_cancel_enter(int maycancel)
+{
+ _thr_cancel_enter2(_get_curthread(), maycancel);
+}
+
+void
+_pthread_cancel_leave(int maycancel)
+{
+ _thr_cancel_leave(_get_curthread(), maycancel);
+}
diff --git a/lib/libthr/thread/thr_clean.c b/lib/libthr/thread/thr_clean.c
new file mode 100644
index 0000000000000..5a93d94a7e5a5
--- /dev/null
+++ b/lib/libthr/thread/thr_clean.c
@@ -0,0 +1,104 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <signal.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+#undef pthread_cleanup_push
+#undef pthread_cleanup_pop
+
+/* old binary compatible interfaces */
+__weak_reference(_pthread_cleanup_push, pthread_cleanup_push);
+__weak_reference(_pthread_cleanup_pop, pthread_cleanup_pop);
+
+void
+__pthread_cleanup_push_imp(void (*routine)(void *), void *arg,
+ struct _pthread_cleanup_info *info)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_cleanup *newbuf;
+
+ newbuf = (void *)info;
+ newbuf->routine = routine;
+ newbuf->routine_arg = arg;
+ newbuf->onheap = 0;
+ newbuf->prev = curthread->cleanup;
+ curthread->cleanup = newbuf;
+}
+
+void
+__pthread_cleanup_pop_imp(int execute)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_cleanup *old;
+
+ if ((old = curthread->cleanup) != NULL) {
+ curthread->cleanup = old->prev;
+ if (execute)
+ old->routine(old->routine_arg);
+ if (old->onheap)
+ free(old);
+ }
+}
+
+void
+_pthread_cleanup_push(void (*routine) (void *), void *arg)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_cleanup *newbuf;
+#ifdef _PTHREAD_FORCED_UNWIND
+ curthread->unwind_disabled = 1;
+#endif
+ if ((newbuf = (struct pthread_cleanup *)
+ malloc(sizeof(struct pthread_cleanup))) != NULL) {
+ newbuf->routine = routine;
+ newbuf->routine_arg = arg;
+ newbuf->onheap = 1;
+ newbuf->prev = curthread->cleanup;
+ curthread->cleanup = newbuf;
+ }
+}
+
+void
+_pthread_cleanup_pop(int execute)
+{
+ __pthread_cleanup_pop_imp(execute);
+}
diff --git a/lib/libthr/thread/thr_concurrency.c b/lib/libthr/thread/thr_concurrency.c
new file mode 100644
index 0000000000000..9f60dca1e445b
--- /dev/null
+++ b/lib/libthr/thread/thr_concurrency.c
@@ -0,0 +1,68 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 2003 Sergey Osokin <osa@FreeBSD.org.ru>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Sergey Osokin.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SERGEY OSOKIN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+static int current_concurrency = 0;
+
+__weak_reference(_pthread_getconcurrency, pthread_getconcurrency);
+__weak_reference(_pthread_setconcurrency, pthread_setconcurrency);
+
+int
+_pthread_getconcurrency(void)
+{
+ return current_concurrency;
+}
+
+int
+_pthread_setconcurrency(int new_level)
+{
+ int ret;
+
+ if (new_level < 0) {
+ ret = EINVAL;
+ } else {
+ current_concurrency = new_level;
+ ret = 0;
+ }
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c
new file mode 100644
index 0000000000000..ecaf787aa5179
--- /dev/null
+++ b/lib/libthr/thread/thr_cond.c
@@ -0,0 +1,546 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include <limits.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+_Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE,
+ "pthread_cond too large");
+
+/*
+ * Prototypes
+ */
+int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
+int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec * abstime);
+static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
+static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime, int cancel);
+static int cond_signal_common(pthread_cond_t *cond);
+static int cond_broadcast_common(pthread_cond_t *cond);
+
+/*
+ * Double underscore versions are cancellation points. Single underscore
+ * versions are not and are provided for libc internal usage (which
+ * shouldn't introduce cancellation points).
+ */
+__weak_reference(__pthread_cond_wait, pthread_cond_wait);
+__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
+
+__weak_reference(_pthread_cond_init, pthread_cond_init);
+__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
+__weak_reference(_pthread_cond_signal, pthread_cond_signal);
+__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
+
+#define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0)
+
+static void
+cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
+{
+
+ if (cattr == NULL) {
+ cvp->kcond.c_clockid = CLOCK_REALTIME;
+ } else {
+ if (cattr->c_pshared)
+ cvp->kcond.c_flags |= USYNC_PROCESS_SHARED;
+ cvp->kcond.c_clockid = cattr->c_clockid;
+ }
+}
+
+static int
+cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
+{
+ struct pthread_cond *cvp;
+ const struct pthread_cond_attr *cattr;
+ int pshared;
+
+ cattr = cond_attr != NULL ? *cond_attr : NULL;
+ if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
+ pshared = 0;
+ cvp = calloc(1, sizeof(struct pthread_cond));
+ if (cvp == NULL)
+ return (ENOMEM);
+ } else {
+ pshared = 1;
+ cvp = __thr_pshared_offpage(cond, 1);
+ if (cvp == NULL)
+ return (EFAULT);
+ }
+
+ /*
+ * Initialise the condition variable structure:
+ */
+ cond_init_body(cvp, cattr);
+ *cond = pshared ? THR_PSHARED_PTR : cvp;
+ return (0);
+}
+
+static int
+init_static(struct pthread *thread, pthread_cond_t *cond)
+{
+ int ret;
+
+ THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
+
+ if (*cond == NULL)
+ ret = cond_init(cond, NULL);
+ else
+ ret = 0;
+
+ THR_LOCK_RELEASE(thread, &_cond_static_lock);
+
+ return (ret);
+}
+
+#define CHECK_AND_INIT_COND \
+ if (*cond == THR_PSHARED_PTR) { \
+ cvp = __thr_pshared_offpage(cond, 0); \
+ if (cvp == NULL) \
+ return (EINVAL); \
+ } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
+ if (cvp == THR_COND_INITIALIZER) { \
+ int ret; \
+ ret = init_static(_get_curthread(), cond); \
+ if (ret) \
+ return (ret); \
+ } else if (cvp == THR_COND_DESTROYED) { \
+ return (EINVAL); \
+ } \
+ cvp = *cond; \
+ }
+
+int
+_pthread_cond_init(pthread_cond_t * __restrict cond,
+ const pthread_condattr_t * __restrict cond_attr)
+{
+
+ *cond = NULL;
+ return (cond_init(cond, cond_attr));
+}
+
+int
+_pthread_cond_destroy(pthread_cond_t *cond)
+{
+ struct pthread_cond *cvp;
+ int error;
+
+ error = 0;
+ if (*cond == THR_PSHARED_PTR) {
+ cvp = __thr_pshared_offpage(cond, 0);
+ if (cvp != NULL)
+ __thr_pshared_destroy(cond);
+ *cond = THR_COND_DESTROYED;
+ } else if ((cvp = *cond) == THR_COND_INITIALIZER) {
+ /* nothing */
+ } else if (cvp == THR_COND_DESTROYED) {
+ error = EINVAL;
+ } else {
+ cvp = *cond;
+ *cond = THR_COND_DESTROYED;
+ free(cvp);
+ }
+ return (error);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, if thread is canceled, it means it
+ * did not get a wakeup from pthread_cond_signal(), otherwise, it is
+ * not canceled.
+ * Thread cancellation never cause wakeup from pthread_cond_signal()
+ * to be lost.
+ */
+static int
+cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
+ const struct timespec *abstime, int cancel)
+{
+ struct pthread *curthread;
+ int error, error2, recurse, robust;
+
+ curthread = _get_curthread();
+ robust = _mutex_enter_robust(curthread, mp);
+
+ error = _mutex_cv_detach(mp, &recurse);
+ if (error != 0) {
+ if (robust)
+ _mutex_leave_robust(curthread, mp);
+ return (error);
+ }
+
+ if (cancel)
+ _thr_cancel_enter2(curthread, 0);
+ error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
+ CVWAIT_ABSTIME | CVWAIT_CLOCKID);
+ if (cancel)
+ _thr_cancel_leave(curthread, 0);
+
+ /*
+ * Note that PP mutex and ROBUST mutex may return
+ * interesting error codes.
+ */
+ if (error == 0) {
+ error2 = _mutex_cv_lock(mp, recurse, true);
+ } else if (error == EINTR || error == ETIMEDOUT) {
+ error2 = _mutex_cv_lock(mp, recurse, true);
+ /*
+ * Do not do cancellation on EOWNERDEAD there. The
+ * cancellation cleanup handler will use the protected
+ * state and unlock the mutex without making the state
+ * consistent and the state will be unrecoverable.
+ */
+ if (error2 == 0 && cancel) {
+ if (robust) {
+ _mutex_leave_robust(curthread, mp);
+ robust = false;
+ }
+ _thr_testcancel(curthread);
+ }
+
+ if (error == EINTR)
+ error = 0;
+ } else {
+ /* We know that it didn't unlock the mutex. */
+ _mutex_cv_attach(mp, recurse);
+ if (cancel) {
+ if (robust) {
+ _mutex_leave_robust(curthread, mp);
+ robust = false;
+ }
+ _thr_testcancel(curthread);
+ }
+ error2 = 0;
+ }
+ if (robust)
+ _mutex_leave_robust(curthread, mp);
+ return (error2 != 0 ? error2 : error);
+}
+
+/*
+ * Thread waits in userland queue whenever possible, when thread
+ * is signaled or broadcasted, it is removed from the queue, and
+ * is saved in curthread's defer_waiters[] buffer, but won't be
+ * woken up until mutex is unlocked.
+ */
+
+static int
+cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
+ const struct timespec *abstime, int cancel)
+{
+ struct pthread *curthread;
+ struct sleepqueue *sq;
+ int deferred, error, error2, recurse;
+
+ curthread = _get_curthread();
+ if (curthread->wchan != NULL)
+ PANIC("thread %p was already on queue.", curthread);
+
+ if (cancel)
+ _thr_testcancel(curthread);
+
+ _sleepq_lock(cvp);
+ /*
+ * set __has_user_waiters before unlocking mutex, this allows
+ * us to check it without locking in pthread_cond_signal().
+ */
+ cvp->__has_user_waiters = 1;
+ deferred = 0;
+ (void)_mutex_cv_unlock(mp, &recurse, &deferred);
+ curthread->mutex_obj = mp;
+ _sleepq_add(cvp, curthread);
+ for(;;) {
+ _thr_clear_wake(curthread);
+ _sleepq_unlock(cvp);
+ if (deferred) {
+ deferred = 0;
+ if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
+ (void)_umtx_op_err(&mp->m_lock,
+ UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
+ 0, 0);
+ }
+ if (curthread->nwaiter_defer > 0) {
+ _thr_wake_all(curthread->defer_waiters,
+ curthread->nwaiter_defer);
+ curthread->nwaiter_defer = 0;
+ }
+
+ if (cancel)
+ _thr_cancel_enter2(curthread, 0);
+ error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime);
+ if (cancel)
+ _thr_cancel_leave(curthread, 0);
+
+ _sleepq_lock(cvp);
+ if (curthread->wchan == NULL) {
+ error = 0;
+ break;
+ } else if (cancel && SHOULD_CANCEL(curthread)) {
+ sq = _sleepq_lookup(cvp);
+ cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
+ _sleepq_unlock(cvp);
+ curthread->mutex_obj = NULL;
+ error2 = _mutex_cv_lock(mp, recurse, false);
+ if (!THR_IN_CRITICAL(curthread))
+ _pthread_exit(PTHREAD_CANCELED);
+ else /* this should not happen */
+ return (error2);
+ } else if (error == ETIMEDOUT) {
+ sq = _sleepq_lookup(cvp);
+ cvp->__has_user_waiters =
+ _sleepq_remove(sq, curthread);
+ break;
+ }
+ }
+ _sleepq_unlock(cvp);
+ curthread->mutex_obj = NULL;
+ error2 = _mutex_cv_lock(mp, recurse, false);
+ if (error == 0)
+ error = error2;
+ return (error);
+}
+
+static int
+cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime, int cancel)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_cond *cvp;
+ struct pthread_mutex *mp;
+ int error;
+
+ CHECK_AND_INIT_COND
+
+ if (*mutex == THR_PSHARED_PTR) {
+ mp = __thr_pshared_offpage(mutex, 0);
+ if (mp == NULL)
+ return (EINVAL);
+ } else {
+ mp = *mutex;
+ }
+
+ if ((error = _mutex_owned(curthread, mp)) != 0)
+ return (error);
+
+ if (curthread->attr.sched_policy != SCHED_OTHER ||
+ (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
+ USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp))
+ return (cond_wait_kernel(cvp, mp, abstime, cancel));
+ else
+ return (cond_wait_user(cvp, mp, abstime, cancel));
+}
+
+int
+_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+
+ return (cond_wait_common(cond, mutex, NULL, 0));
+}
+
+int
+__pthread_cond_wait(pthread_cond_t * __restrict cond,
+ pthread_mutex_t * __restrict mutex)
+{
+
+ return (cond_wait_common(cond, mutex, NULL, 1));
+}
+
+int
+_pthread_cond_timedwait(pthread_cond_t * __restrict cond,
+ pthread_mutex_t * __restrict mutex,
+ const struct timespec * __restrict abstime)
+{
+
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ return (cond_wait_common(cond, mutex, abstime, 0));
+}
+
+int
+__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ return (cond_wait_common(cond, mutex, abstime, 1));
+}
+
+static int
+cond_signal_common(pthread_cond_t *cond)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread *td;
+ struct pthread_cond *cvp;
+ struct pthread_mutex *mp;
+ struct sleepqueue *sq;
+ int *waddr;
+ int pshared;
+
+ /*
+ * If the condition variable is statically initialized, perform dynamic
+ * initialization.
+ */
+ CHECK_AND_INIT_COND
+
+ pshared = CV_PSHARED(cvp);
+
+ _thr_ucond_signal(&cvp->kcond);
+
+ if (pshared || cvp->__has_user_waiters == 0)
+ return (0);
+
+ curthread = _get_curthread();
+ waddr = NULL;
+ _sleepq_lock(cvp);
+ sq = _sleepq_lookup(cvp);
+ if (sq == NULL) {
+ _sleepq_unlock(cvp);
+ return (0);
+ }
+
+ td = _sleepq_first(sq);
+ mp = td->mutex_obj;
+ cvp->__has_user_waiters = _sleepq_remove(sq, td);
+ if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
+ if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
+ _thr_wake_all(curthread->defer_waiters,
+ curthread->nwaiter_defer);
+ curthread->nwaiter_defer = 0;
+ }
+ curthread->defer_waiters[curthread->nwaiter_defer++] =
+ &td->wake_addr->value;
+ mp->m_flags |= PMUTEX_FLAG_DEFERRED;
+ } else {
+ waddr = &td->wake_addr->value;
+ }
+ _sleepq_unlock(cvp);
+ if (waddr != NULL)
+ _thr_set_wake(waddr);
+ return (0);
+}
+
+struct broadcast_arg {
+ struct pthread *curthread;
+ unsigned int *waddrs[MAX_DEFER_WAITERS];
+ int count;
+};
+
+static void
+drop_cb(struct pthread *td, void *arg)
+{
+ struct broadcast_arg *ba = arg;
+ struct pthread_mutex *mp;
+ struct pthread *curthread = ba->curthread;
+
+ mp = td->mutex_obj;
+ if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
+ if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
+ _thr_wake_all(curthread->defer_waiters,
+ curthread->nwaiter_defer);
+ curthread->nwaiter_defer = 0;
+ }
+ curthread->defer_waiters[curthread->nwaiter_defer++] =
+ &td->wake_addr->value;
+ mp->m_flags |= PMUTEX_FLAG_DEFERRED;
+ } else {
+ if (ba->count >= MAX_DEFER_WAITERS) {
+ _thr_wake_all(ba->waddrs, ba->count);
+ ba->count = 0;
+ }
+ ba->waddrs[ba->count++] = &td->wake_addr->value;
+ }
+}
+
+static int
+cond_broadcast_common(pthread_cond_t *cond)
+{
+ int pshared;
+ struct pthread_cond *cvp;
+ struct sleepqueue *sq;
+ struct broadcast_arg ba;
+
+ /*
+ * If the condition variable is statically initialized, perform dynamic
+ * initialization.
+ */
+ CHECK_AND_INIT_COND
+
+ pshared = CV_PSHARED(cvp);
+
+ _thr_ucond_broadcast(&cvp->kcond);
+
+ if (pshared || cvp->__has_user_waiters == 0)
+ return (0);
+
+ ba.curthread = _get_curthread();
+ ba.count = 0;
+
+ _sleepq_lock(cvp);
+ sq = _sleepq_lookup(cvp);
+ if (sq == NULL) {
+ _sleepq_unlock(cvp);
+ return (0);
+ }
+ _sleepq_drop(sq, drop_cb, &ba);
+ cvp->__has_user_waiters = 0;
+ _sleepq_unlock(cvp);
+ if (ba.count > 0)
+ _thr_wake_all(ba.waddrs, ba.count);
+ return (0);
+}
+
+int
+_pthread_cond_signal(pthread_cond_t * cond)
+{
+
+ return (cond_signal_common(cond));
+}
+
+int
+_pthread_cond_broadcast(pthread_cond_t * cond)
+{
+
+ return (cond_broadcast_common(cond));
+}
diff --git a/lib/libthr/thread/thr_condattr.c b/lib/libthr/thread/thr_condattr.c
new file mode 100644
index 0000000000000..0a06d004cef78
--- /dev/null
+++ b/lib/libthr/thread/thr_condattr.c
@@ -0,0 +1,130 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_condattr_init, pthread_condattr_init);
+__weak_reference(_pthread_condattr_destroy, pthread_condattr_destroy);
+__weak_reference(_pthread_condattr_getclock, pthread_condattr_getclock);
+__weak_reference(_pthread_condattr_setclock, pthread_condattr_setclock);
+__weak_reference(_pthread_condattr_getpshared, pthread_condattr_getpshared);
+__weak_reference(_pthread_condattr_setpshared, pthread_condattr_setpshared);
+
+int
+_pthread_condattr_init(pthread_condattr_t *attr)
+{
+ pthread_condattr_t pattr;
+ int ret;
+
+ if ((pattr = (pthread_condattr_t)
+ malloc(sizeof(struct pthread_cond_attr))) == NULL) {
+ ret = ENOMEM;
+ } else {
+ memcpy(pattr, &_pthread_condattr_default,
+ sizeof(struct pthread_cond_attr));
+ *attr = pattr;
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+_pthread_condattr_destroy(pthread_condattr_t *attr)
+{
+ int ret;
+
+ if (attr == NULL || *attr == NULL) {
+ ret = EINVAL;
+ } else {
+ free(*attr);
+ *attr = NULL;
+ ret = 0;
+ }
+ return(ret);
+}
+
+int
+_pthread_condattr_getclock(const pthread_condattr_t * __restrict attr,
+ clockid_t * __restrict clock_id)
+{
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+ *clock_id = (*attr)->c_clockid;
+ return (0);
+}
+
+int
+_pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
+{
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+ if (clock_id != CLOCK_REALTIME &&
+ clock_id != CLOCK_VIRTUAL &&
+ clock_id != CLOCK_PROF &&
+ clock_id != CLOCK_MONOTONIC) {
+ return (EINVAL);
+ }
+ (*attr)->c_clockid = clock_id;
+ return (0);
+}
+
+int
+_pthread_condattr_getpshared(const pthread_condattr_t * __restrict attr,
+ int * __restrict pshared)
+{
+
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+ *pshared = (*attr)->c_pshared;
+ return (0);
+}
+
+int
+_pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
+{
+
+ if (attr == NULL || *attr == NULL ||
+ (pshared != PTHREAD_PROCESS_PRIVATE &&
+ pshared != PTHREAD_PROCESS_SHARED))
+ return (EINVAL);
+ (*attr)->c_pshared = pshared;
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_create.c b/lib/libthr/thread/thr_create.c
new file mode 100644
index 0000000000000..69150ee6f8ea1
--- /dev/null
+++ b/lib/libthr/thread/thr_create.c
@@ -0,0 +1,296 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2003 Daniel M. Eischen <deischen@gdeb.com>
+ * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/rtprio.h>
+#include <sys/signalvar.h>
+#include <errno.h>
+#include <link.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stddef.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+static int create_stack(struct pthread_attr *pattr);
+static void thread_start(struct pthread *curthread);
+
+__weak_reference(_pthread_create, pthread_create);
+
+int
+_pthread_create(pthread_t * __restrict thread,
+ const pthread_attr_t * __restrict attr, void *(*start_routine) (void *),
+ void * __restrict arg)
+{
+ struct pthread *curthread, *new_thread;
+ struct thr_param param;
+ struct sched_param sched_param;
+ struct rtprio rtp;
+ sigset_t set, oset;
+ cpuset_t *cpusetp;
+ int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;
+
+ cpusetp = NULL;
+ ret = cpusetsize = 0;
+ _thr_check_init();
+
+ /*
+ * Tell libc and others now they need lock to protect their data.
+ */
+ if (_thr_isthreaded() == 0) {
+ _malloc_first_thread();
+ if (_thr_setthreaded(1))
+ return (EAGAIN);
+ }
+
+ curthread = _get_curthread();
+ if ((new_thread = _thr_alloc(curthread)) == NULL)
+ return (EAGAIN);
+
+ memset(&param, 0, sizeof(param));
+
+ if (attr == NULL || *attr == NULL)
+ /* Use the default thread attributes: */
+ new_thread->attr = _pthread_attr_default;
+ else {
+ new_thread->attr = *(*attr);
+ cpusetp = new_thread->attr.cpuset;
+ cpusetsize = new_thread->attr.cpusetsize;
+ new_thread->attr.cpuset = NULL;
+ new_thread->attr.cpusetsize = 0;
+ }
+ if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
+ /* inherit scheduling contention scope */
+ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
+ new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
+ else
+ new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
+
+ new_thread->attr.prio = curthread->attr.prio;
+ new_thread->attr.sched_policy = curthread->attr.sched_policy;
+ }
+
+ new_thread->tid = TID_TERMINATED;
+
+ old_stack_prot = _rtld_get_stack_prot();
+ if (create_stack(&new_thread->attr) != 0) {
+ /* Insufficient memory to create a stack: */
+ _thr_free(curthread, new_thread);
+ return (EAGAIN);
+ }
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ new_thread->magic = THR_MAGIC;
+ new_thread->start_routine = start_routine;
+ new_thread->arg = arg;
+ new_thread->cancel_enable = 1;
+ new_thread->cancel_async = 0;
+ /* Initialize the mutex queue: */
+ for (i = 0; i < TMQ_NITEMS; i++)
+ TAILQ_INIT(&new_thread->mq[i]);
+
+ /* Initialise hooks in the thread structure: */
+ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
+ new_thread->flags = THR_FLAGS_NEED_SUSPEND;
+ create_suspended = 1;
+ } else {
+ create_suspended = 0;
+ }
+
+ new_thread->state = PS_RUNNING;
+
+ if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
+ new_thread->flags |= THR_FLAGS_DETACHED;
+
+ /* Add the new thread. */
+ new_thread->refcount = 1;
+ _thr_link(curthread, new_thread);
+
+ /*
+ * Handle the race between __pthread_map_stacks_exec and
+ * thread linkage.
+ */
+ if (old_stack_prot != _rtld_get_stack_prot())
+ _thr_stack_fix_protection(new_thread);
+
+ /* Return thread pointer eariler so that new thread can use it. */
+ (*thread) = new_thread;
+ if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) {
+ THR_THREAD_LOCK(curthread, new_thread);
+ locked = 1;
+ } else
+ locked = 0;
+ param.start_func = (void (*)(void *)) thread_start;
+ param.arg = new_thread;
+ param.stack_base = new_thread->attr.stackaddr_attr;
+ param.stack_size = new_thread->attr.stacksize_attr;
+ param.tls_base = (char *)new_thread->tcb;
+ param.tls_size = sizeof(struct tcb);
+ param.child_tid = &new_thread->tid;
+ param.parent_tid = &new_thread->tid;
+ param.flags = 0;
+ if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
+ param.flags |= THR_SYSTEM_SCOPE;
+ if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED)
+ param.rtp = NULL;
+ else {
+ sched_param.sched_priority = new_thread->attr.prio;
+ _schedparam_to_rtp(new_thread->attr.sched_policy,
+ &sched_param, &rtp);
+ param.rtp = &rtp;
+ }
+
+ /* Schedule the new thread. */
+ if (create_suspended) {
+ SIGFILLSET(set);
+ SIGDELSET(set, SIGTRAP);
+ __sys_sigprocmask(SIG_SETMASK, &set, &oset);
+ new_thread->sigmask = oset;
+ SIGDELSET(new_thread->sigmask, SIGCANCEL);
+ }
+
+ ret = thr_new(&param, sizeof(param));
+
+ if (ret != 0) {
+ ret = errno;
+ /*
+ * Translate EPROCLIM into well-known POSIX code EAGAIN.
+ */
+ if (ret == EPROCLIM)
+ ret = EAGAIN;
+ }
+
+ if (create_suspended)
+ __sys_sigprocmask(SIG_SETMASK, &oset, NULL);
+
+ if (ret != 0) {
+ if (!locked)
+ THR_THREAD_LOCK(curthread, new_thread);
+ new_thread->state = PS_DEAD;
+ new_thread->tid = TID_TERMINATED;
+ new_thread->flags |= THR_FLAGS_DETACHED;
+ new_thread->refcount--;
+ if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
+ new_thread->cycle++;
+ _thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
+ }
+ _thr_try_gc(curthread, new_thread); /* thread lock released */
+ atomic_add_int(&_thread_active_threads, -1);
+ } else if (locked) {
+ if (cpusetp != NULL) {
+ if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
+ TID(new_thread), cpusetsize, cpusetp)) {
+ ret = errno;
+ /* kill the new thread */
+ new_thread->force_exit = 1;
+ new_thread->flags |= THR_FLAGS_DETACHED;
+ _thr_try_gc(curthread, new_thread);
+ /* thread lock released */
+ goto out;
+ }
+ }
+
+ _thr_report_creation(curthread, new_thread);
+ THR_THREAD_UNLOCK(curthread, new_thread);
+ }
+out:
+ if (ret)
+ (*thread) = 0;
+ return (ret);
+}
+
+static int
+create_stack(struct pthread_attr *pattr)
+{
+ int ret;
+
+ /* Check if a stack was specified in the thread attributes: */
+ if ((pattr->stackaddr_attr) != NULL) {
+ pattr->guardsize_attr = 0;
+ pattr->flags |= THR_STACK_USER;
+ ret = 0;
+ }
+ else
+ ret = _thr_stack_alloc(pattr);
+ return (ret);
+}
+
+static void
+thread_start(struct pthread *curthread)
+{
+ sigset_t set;
+
+ if (curthread->attr.suspend == THR_CREATE_SUSPENDED)
+ set = curthread->sigmask;
+
+ /*
+ * This is used as a serialization point to allow parent
+ * to report 'new thread' event to debugger or tweak new thread's
+ * attributes before the new thread does real-world work.
+ */
+ THR_LOCK(curthread);
+ THR_UNLOCK(curthread);
+
+ if (curthread->force_exit)
+ _pthread_exit(PTHREAD_CANCELED);
+
+ if (curthread->attr.suspend == THR_CREATE_SUSPENDED) {
+#if 0
+ /* Done in THR_UNLOCK() */
+ _thr_ast(curthread);
+#endif
+
+ /*
+ * Parent thread have stored signal mask for us,
+ * we should restore it now.
+ */
+ __sys_sigprocmask(SIG_SETMASK, &set, NULL);
+ }
+
+#ifdef _PTHREAD_FORCED_UNWIND
+ curthread->unwind_stackend = (char *)curthread->attr.stackaddr_attr +
+ curthread->attr.stacksize_attr;
+#endif
+
+ /* Run the current thread's start routine with argument: */
+ _pthread_exit(curthread->start_routine(curthread->arg));
+
+ /* This point should never be reached. */
+ PANIC("Thread has resumed after exit");
+}
diff --git a/lib/libthr/thread/thr_ctrdtr.c b/lib/libthr/thread/thr_ctrdtr.c
new file mode 100644
index 0000000000000..9d4301ef719f9
--- /dev/null
+++ b/lib/libthr/thread/thr_ctrdtr.c
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (C) 2003 Jake Burkholder <jake@freebsd.org>
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <rtld_tls.h>
+
+#include "thr_private.h"
+
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+
+ if (initial)
+ tcb = _tcb_get();
+ else
+ tcb = _rtld_allocate_tls(NULL, sizeof(struct tcb), 16);
+ if (tcb)
+ tcb->tcb_thread = thread;
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+}
diff --git a/lib/libthr/thread/thr_detach.c b/lib/libthr/thread/thr_detach.c
new file mode 100644
index 0000000000000..feac319d0df86
--- /dev/null
+++ b/lib/libthr/thread/thr_detach.c
@@ -0,0 +1,69 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_detach, pthread_detach);
+
+int
+_pthread_detach(pthread_t pthread)
+{
+ struct pthread *curthread = _get_curthread();
+ int rval;
+
+ if (pthread == NULL)
+ return (EINVAL);
+
+ if ((rval = _thr_find_thread(curthread, pthread,
+ /*include dead*/1)) != 0) {
+ return (rval);
+ }
+
+ /* Check if the thread is already detached or has a joiner. */
+ if ((pthread->flags & THR_FLAGS_DETACHED) != 0 ||
+ (pthread->joiner != NULL)) {
+ THR_THREAD_UNLOCK(curthread, pthread);
+ return (EINVAL);
+ }
+
+ /* Flag the thread as detached. */
+ pthread->flags |= THR_FLAGS_DETACHED;
+ _thr_try_gc(curthread, pthread); /* thread lock released */
+
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_equal.c b/lib/libthr/thread/thr_equal.c
new file mode 100644
index 0000000000000..8baed3b75c3f9
--- /dev/null
+++ b/lib/libthr/thread/thr_equal.c
@@ -0,0 +1,47 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include "un-namespace.h"
+#include "thr_private.h"
+
+__weak_reference(_pthread_equal, pthread_equal);
+
+int
+_pthread_equal(pthread_t t1, pthread_t t2)
+{
+ /* Compare the two thread pointers: */
+ return (t1 == t2);
+}
diff --git a/lib/libthr/thread/thr_event.c b/lib/libthr/thread/thr_event.c
new file mode 100644
index 0000000000000..977c0dacb7c7c
--- /dev/null
+++ b/lib/libthr/thread/thr_event.c
@@ -0,0 +1,68 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005 David Xu
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "thr_private.h"
+
+void
+_thread_bp_create(void)
+{
+}
+
+void
+_thread_bp_death(void)
+{
+}
+
+void
+_thr_report_creation(struct pthread *curthread, struct pthread *newthread)
+{
+ curthread->event_buf.event = TD_CREATE;
+ curthread->event_buf.th_p = (uintptr_t)newthread;
+ curthread->event_buf.data = 0;
+ THR_UMUTEX_LOCK(curthread, &_thr_event_lock);
+ _thread_last_event = curthread;
+ _thread_bp_create();
+ _thread_last_event = NULL;
+ THR_UMUTEX_UNLOCK(curthread, &_thr_event_lock);
+}
+
+void
+_thr_report_death(struct pthread *curthread)
+{
+ curthread->event_buf.event = TD_DEATH;
+ curthread->event_buf.th_p = (uintptr_t)curthread;
+ curthread->event_buf.data = 0;
+ THR_UMUTEX_LOCK(curthread, &_thr_event_lock);
+ _thread_last_event = curthread;
+ _thread_bp_death();
+ _thread_last_event = NULL;
+ THR_UMUTEX_UNLOCK(curthread, &_thr_event_lock);
+}
diff --git a/lib/libthr/thread/thr_exit.c b/lib/libthr/thread/thr_exit.c
new file mode 100644
index 0000000000000..5c5a6bd766a31
--- /dev/null
+++ b/lib/libthr/thread/thr_exit.c
@@ -0,0 +1,329 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#ifdef _PTHREAD_FORCED_UNWIND
+#include <dlfcn.h>
+#endif
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/signalvar.h>
+#include "un-namespace.h"
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+static void exit_thread(void) __dead2;
+
+__weak_reference(_pthread_exit, pthread_exit);
+
+#ifdef _PTHREAD_FORCED_UNWIND
+static int message_printed;
+
+static void thread_unwind(void) __dead2;
+#ifdef PIC
+static void thread_uw_init(void);
+static _Unwind_Reason_Code thread_unwind_stop(int version,
+ _Unwind_Action actions,
+ int64_t exc_class,
+ struct _Unwind_Exception *exc_obj,
+ struct _Unwind_Context *context, void *stop_parameter);
+/* unwind library pointers */
+static _Unwind_Reason_Code (*uwl_forcedunwind)(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+static unsigned long (*uwl_getcfa)(struct _Unwind_Context *);
+
+static void
+thread_uw_init(void)
+{
+ static int inited = 0;
+ Dl_info dli;
+ void *handle;
+ void *forcedunwind, *getcfa;
+
+ if (inited)
+ return;
+ handle = RTLD_DEFAULT;
+ if ((forcedunwind = dlsym(handle, "_Unwind_ForcedUnwind")) != NULL) {
+ if (dladdr(forcedunwind, &dli)) {
+ /*
+ * Make sure the address is always valid by holding the library,
+ * also assume functions are in same library.
+ */
+ if ((handle = dlopen(dli.dli_fname, RTLD_LAZY)) != NULL) {
+ forcedunwind = dlsym(handle, "_Unwind_ForcedUnwind");
+ getcfa = dlsym(handle, "_Unwind_GetCFA");
+ if (forcedunwind != NULL && getcfa != NULL) {
+ uwl_getcfa = getcfa;
+ atomic_store_rel_ptr((volatile void *)&uwl_forcedunwind,
+ (uintptr_t)forcedunwind);
+ } else {
+ dlclose(handle);
+ }
+ }
+ }
+ }
+ inited = 1;
+}
+
+_Unwind_Reason_Code
+_Unwind_ForcedUnwind(struct _Unwind_Exception *ex, _Unwind_Stop_Fn stop_func,
+ void *stop_arg)
+{
+ return (*uwl_forcedunwind)(ex, stop_func, stop_arg);
+}
+
+unsigned long
+_Unwind_GetCFA(struct _Unwind_Context *context)
+{
+ return (*uwl_getcfa)(context);
+}
+#else
+#pragma weak _Unwind_GetCFA
+#pragma weak _Unwind_ForcedUnwind
+#endif /* PIC */
+
+static void
+thread_unwind_cleanup(_Unwind_Reason_Code code __unused,
+ struct _Unwind_Exception *e __unused)
+{
+ /*
+ * Specification said that _Unwind_Resume should not be used here,
+ * instead, user should rethrow the exception. For C++ user, they
+ * should put "throw" sentence in catch(...) block.
+ */
+ PANIC("exception should be rethrown");
+}
+
+static _Unwind_Reason_Code
+thread_unwind_stop(int version __unused, _Unwind_Action actions,
+ int64_t exc_class __unused,
+ struct _Unwind_Exception *exc_obj __unused,
+ struct _Unwind_Context *context, void *stop_parameter __unused)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_cleanup *cur;
+ uintptr_t cfa;
+ int done = 0;
+
+ /* XXX assume stack grows down to lower address */
+
+ cfa = _Unwind_GetCFA(context);
+ if (actions & _UA_END_OF_STACK ||
+ cfa >= (uintptr_t)curthread->unwind_stackend) {
+ done = 1;
+ }
+
+ while ((cur = curthread->cleanup) != NULL &&
+ (done || (uintptr_t)cur <= cfa)) {
+ __pthread_cleanup_pop_imp(1);
+ }
+
+ if (done) {
+ /* Tell libc that it should call non-trivial TLS dtors. */
+ __cxa_thread_call_dtors();
+
+ exit_thread(); /* Never return! */
+ }
+
+ return (_URC_NO_REASON);
+}
+
+static void
+thread_unwind(void)
+{
+ struct pthread *curthread = _get_curthread();
+
+ curthread->ex.exception_class = 0;
+ curthread->ex.exception_cleanup = thread_unwind_cleanup;
+ _Unwind_ForcedUnwind(&curthread->ex, thread_unwind_stop, NULL);
+ PANIC("_Unwind_ForcedUnwind returned");
+}
+
+#endif
+
+void
+_thread_exitf(const char *fname, int lineno, const char *fmt, ...)
+{
+ va_list ap;
+
+ /* Write an error message to the standard error file descriptor: */
+ _thread_printf(STDERR_FILENO, "Fatal error '");
+
+ va_start(ap, fmt);
+ _thread_vprintf(STDERR_FILENO, fmt, ap);
+ va_end(ap);
+
+ _thread_printf(STDERR_FILENO, "' at line %d in file %s (errno = %d)\n",
+ lineno, fname, errno);
+
+ abort();
+}
+
+void
+_thread_exit(const char *fname, int lineno, const char *msg)
+{
+
+ _thread_exitf(fname, lineno, "%s", msg);
+}
+
+void
+_pthread_exit(void *status)
+{
+ _pthread_exit_mask(status, NULL);
+}
+
+void
+_pthread_exit_mask(void *status, sigset_t *mask)
+{
+ struct pthread *curthread = _get_curthread();
+
+ /* Check if this thread is already in the process of exiting: */
+ if (curthread->cancelling)
+ PANIC("Thread %p has called "
+ "pthread_exit() from a destructor. POSIX 1003.1 "
+ "1996 s16.2.5.2 does not allow this!", curthread);
+
+ /* Flag this thread as exiting. */
+ curthread->cancelling = 1;
+ curthread->no_cancel = 1;
+ curthread->cancel_async = 0;
+ curthread->cancel_point = 0;
+ if (mask != NULL)
+ __sys_sigprocmask(SIG_SETMASK, mask, NULL);
+ if (curthread->unblock_sigcancel) {
+ sigset_t set;
+
+ curthread->unblock_sigcancel = 0;
+ SIGEMPTYSET(set);
+ SIGADDSET(set, SIGCANCEL);
+ __sys_sigprocmask(SIG_UNBLOCK, mask, NULL);
+ }
+
+ /* Save the return value: */
+ curthread->ret = status;
+#ifdef _PTHREAD_FORCED_UNWIND
+
+#ifdef PIC
+ thread_uw_init();
+ if (uwl_forcedunwind != NULL) {
+#else
+ if (_Unwind_ForcedUnwind != NULL) {
+#endif
+ if (curthread->unwind_disabled) {
+ if (message_printed == 0) {
+ message_printed = 1;
+ _thread_printf(2, "Warning: old _pthread_cleanup_push was called, "
+ "stack unwinding is disabled.\n");
+ }
+ goto cleanup;
+ }
+ thread_unwind();
+
+ } else {
+cleanup:
+ while (curthread->cleanup != NULL) {
+ __pthread_cleanup_pop_imp(1);
+ }
+ __cxa_thread_call_dtors();
+
+ exit_thread();
+ }
+
+#else
+ while (curthread->cleanup != NULL) {
+ __pthread_cleanup_pop_imp(1);
+ }
+ __cxa_thread_call_dtors();
+
+ exit_thread();
+#endif /* _PTHREAD_FORCED_UNWIND */
+}
+
+static void
+exit_thread(void)
+{
+ struct pthread *curthread = _get_curthread();
+
+ free(curthread->name);
+ curthread->name = NULL;
+
+ /* Check if there is thread specific data: */
+ if (curthread->specific != NULL) {
+ /* Run the thread-specific data destructors: */
+ _thread_cleanupspecific();
+ }
+
+ if (!_thr_isthreaded())
+ exit(0);
+
+ if (atomic_fetchadd_int(&_thread_active_threads, -1) == 1) {
+ exit(0);
+ /* Never reach! */
+ }
+
+ /* Tell malloc that the thread is exiting. */
+ _malloc_thread_cleanup();
+
+ THR_LOCK(curthread);
+ curthread->state = PS_DEAD;
+ if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
+ curthread->cycle++;
+ _thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
+ }
+ if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH))
+ _thr_report_death(curthread);
+ /*
+ * Thread was created with initial refcount 1, we drop the
+ * reference count to allow it to be garbage collected.
+ */
+ curthread->refcount--;
+ _thr_try_gc(curthread, curthread); /* thread lock released */
+
+#if defined(_PTHREADS_INVARIANTS)
+ if (THR_IN_CRITICAL(curthread))
+ PANIC("thread %p exits with resources held!", curthread);
+#endif
+ /*
+ * Kernel will do wakeup at the address, so joiner thread
+ * will be resumed if it is sleeping at the address.
+ */
+ thr_exit(&curthread->tid);
+ PANIC("thr_exit() returned");
+ /* Never reach! */
+}
diff --git a/lib/libthr/thread/thr_fork.c b/lib/libthr/thread/thr_fork.c
new file mode 100644
index 0000000000000..5e63e6eec40b8
--- /dev/null
+++ b/lib/libthr/thread/thr_fork.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/syscall.h>
+#include "namespace.h"
+#include <errno.h>
+#include <link.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <spinlock.h>
+#include "un-namespace.h"
+
+#include "libc_private.h"
+#include "rtld_lock.h"
+#include "thr_private.h"
+
+__weak_reference(_pthread_atfork, pthread_atfork);
+
+int
+_pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void))
+{
+ struct pthread *curthread;
+ struct pthread_atfork *af;
+
+ _thr_check_init();
+
+ if ((af = malloc(sizeof(struct pthread_atfork))) == NULL)
+ return (ENOMEM);
+
+ curthread = _get_curthread();
+ af->prepare = prepare;
+ af->parent = parent;
+ af->child = child;
+ THR_CRITICAL_ENTER(curthread);
+ _thr_rwl_wrlock(&_thr_atfork_lock);
+ TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
+ _thr_rwl_unlock(&_thr_atfork_lock);
+ THR_CRITICAL_LEAVE(curthread);
+ return (0);
+}
+
+void
+__pthread_cxa_finalize(struct dl_phdr_info *phdr_info)
+{
+ atfork_head temp_list = TAILQ_HEAD_INITIALIZER(temp_list);
+ struct pthread *curthread;
+ struct pthread_atfork *af, *af1;
+
+ _thr_check_init();
+
+ curthread = _get_curthread();
+ THR_CRITICAL_ENTER(curthread);
+ _thr_rwl_wrlock(&_thr_atfork_lock);
+ TAILQ_FOREACH_SAFE(af, &_thr_atfork_list, qe, af1) {
+ if (__elf_phdr_match_addr(phdr_info, af->prepare) ||
+ __elf_phdr_match_addr(phdr_info, af->parent) ||
+ __elf_phdr_match_addr(phdr_info, af->child)) {
+ TAILQ_REMOVE(&_thr_atfork_list, af, qe);
+ TAILQ_INSERT_TAIL(&temp_list, af, qe);
+ }
+ }
+ _thr_rwl_unlock(&_thr_atfork_lock);
+ THR_CRITICAL_LEAVE(curthread);
+ while ((af = TAILQ_FIRST(&temp_list)) != NULL) {
+ TAILQ_REMOVE(&temp_list, af, qe);
+ free(af);
+ }
+ _thr_tsd_unload(phdr_info);
+ _thr_sigact_unload(phdr_info);
+}
+
+__weak_reference(__thr_fork, _fork);
+
+pid_t
+__thr_fork(void)
+{
+ struct pthread *curthread;
+ struct pthread_atfork *af;
+ pid_t ret;
+ int errsave, cancelsave;
+ int was_threaded;
+ int rtld_locks[MAX_RTLD_LOCKS];
+
+ if (!_thr_is_inited())
+ return (__sys_fork());
+
+ curthread = _get_curthread();
+ cancelsave = curthread->no_cancel;
+ curthread->no_cancel = 1;
+ _thr_rwl_rdlock(&_thr_atfork_lock);
+
+ /* Run down atfork prepare handlers. */
+ TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) {
+ if (af->prepare != NULL)
+ af->prepare();
+ }
+
+ /*
+ * Block all signals until we reach a safe point.
+ */
+ _thr_signal_block(curthread);
+ _thr_signal_prefork();
+
+ /*
+ * All bets are off as to what should happen soon if the parent
+ * process was not so kindly as to set up pthread fork hooks to
+ * relinquish all running threads.
+ */
+ if (_thr_isthreaded() != 0) {
+ was_threaded = 1;
+ _malloc_prefork();
+ __thr_pshared_atfork_pre();
+ _rtld_atfork_pre(rtld_locks);
+ } else {
+ was_threaded = 0;
+ }
+
+ /*
+ * Fork a new process.
+ * There is no easy way to pre-resolve the __sys_fork symbol
+ * without performing the fork. Use the syscall(2)
+ * indirection, the syscall symbol is resolved in
+ * _thr_rtld_init() with side-effect free call.
+ */
+ ret = syscall(SYS_fork);
+ if (ret == 0) {
+ /* Child process */
+ errsave = errno;
+ curthread->cancel_pending = 0;
+ curthread->flags &= ~(THR_FLAGS_NEED_SUSPEND|THR_FLAGS_DETACHED);
+
+ /*
+ * Thread list will be reinitialized, and later we call
+ * _libpthread_init(), it will add us back to list.
+ */
+ curthread->tlflags &= ~TLFLAGS_IN_TDLIST;
+
+ /* child is a new kernel thread. */
+ thr_self(&curthread->tid);
+
+ /* clear other threads locked us. */
+ _thr_umutex_init(&curthread->lock);
+ _mutex_fork(curthread);
+
+ _thr_signal_postfork_child();
+
+ if (was_threaded) {
+ _rtld_atfork_post(rtld_locks);
+ __thr_pshared_atfork_post();
+ }
+ _thr_setthreaded(0);
+
+ /* reinitalize library. */
+ _libpthread_init(curthread);
+
+ /* atfork is reinitialized by _libpthread_init()! */
+ _thr_rwl_rdlock(&_thr_atfork_lock);
+
+ if (was_threaded) {
+ __isthreaded = 1;
+ _malloc_postfork();
+ __isthreaded = 0;
+ }
+
+ /* Ready to continue, unblock signals. */
+ _thr_signal_unblock(curthread);
+
+ /* Run down atfork child handlers. */
+ TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
+ if (af->child != NULL)
+ af->child();
+ }
+ _thr_rwlock_unlock(&_thr_atfork_lock);
+ curthread->no_cancel = cancelsave;
+ } else {
+ /* Parent process */
+ errsave = errno;
+
+ _thr_signal_postfork();
+
+ if (was_threaded) {
+ _rtld_atfork_post(rtld_locks);
+ __thr_pshared_atfork_post();
+ _malloc_postfork();
+ }
+
+ /* Ready to continue, unblock signals. */
+ _thr_signal_unblock(curthread);
+
+ /* Run down atfork parent handlers. */
+ TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
+ if (af->parent != NULL)
+ af->parent();
+ }
+
+ _thr_rwlock_unlock(&_thr_atfork_lock);
+ curthread->no_cancel = cancelsave;
+ /* test async cancel */
+ if (curthread->cancel_async)
+ _thr_testcancel(curthread);
+ }
+ errno = errsave;
+
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_getcpuclockid.c b/lib/libthr/thread/thr_getcpuclockid.c
new file mode 100644
index 0000000000000..20f63039ea89d
--- /dev/null
+++ b/lib/libthr/thread/thr_getcpuclockid.c
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2008 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_getcpuclockid, pthread_getcpuclockid);
+
+int
+_pthread_getcpuclockid(pthread_t pthread, clockid_t *clock_id)
+{
+
+ if (pthread == NULL)
+ return (EINVAL);
+
+ if (clock_getcpuclockid2(TID(pthread), CPUCLOCK_WHICH_TID, clock_id))
+ return (errno);
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_getprio.c b/lib/libthr/thread/thr_getprio.c
new file mode 100644
index 0000000000000..a8d8d2bfe0170
--- /dev/null
+++ b/lib/libthr/thread/thr_getprio.c
@@ -0,0 +1,59 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+#include "thr_private.h"
+
+__weak_reference(_pthread_getprio, pthread_getprio);
+
+int
+_pthread_getprio(pthread_t pthread)
+{
+ int policy, ret;
+ struct sched_param param;
+
+ if ((ret = _pthread_getschedparam(pthread, &policy, &param)) == 0)
+ ret = param.sched_priority;
+ else {
+ /* Invalid thread: */
+ errno = ret;
+ ret = -1;
+ }
+
+ /* Return the thread priority or an error status: */
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_getschedparam.c b/lib/libthr/thread/thr_getschedparam.c
new file mode 100644
index 0000000000000..5785438f30857
--- /dev/null
+++ b/lib/libthr/thread/thr_getschedparam.c
@@ -0,0 +1,71 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/rtprio.h>
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_getschedparam, pthread_getschedparam);
+
+int
+_pthread_getschedparam(pthread_t pthread, int * __restrict policy,
+ struct sched_param * __restrict param)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
+
+ if (policy == NULL || param == NULL)
+ return (EINVAL);
+
+ /*
+ * Avoid searching the thread list when it is the current
+ * thread.
+ */
+ if (pthread == curthread)
+ THR_LOCK(curthread);
+ else if ((ret = _thr_find_thread(curthread, pthread, /*include dead*/0)))
+ return (ret);
+ *policy = pthread->attr.sched_policy;
+ param->sched_priority = pthread->attr.prio;
+ THR_THREAD_UNLOCK(curthread, pthread);
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_getthreadid_np.c b/lib/libthr/thread/thr_getthreadid_np.c
new file mode 100644
index 0000000000000..3011307861ae4
--- /dev/null
+++ b/lib/libthr/thread/thr_getthreadid_np.c
@@ -0,0 +1,51 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2011 Jung-uk Kim <jkim@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_getthreadid_np, pthread_getthreadid_np);
+
+/*
+ * Provide the equivelant to AIX pthread_getthreadid_np() function.
+ */
+int
+_pthread_getthreadid_np(void)
+{
+ struct pthread *curthread;
+
+ _thr_check_init();
+ curthread = _get_curthread();
+ return (TID(curthread));
+}
diff --git a/lib/libthr/thread/thr_info.c b/lib/libthr/thread/thr_info.c
new file mode 100644
index 0000000000000..948ed3e00aab2
--- /dev/null
+++ b/lib/libthr/thread/thr_info.c
@@ -0,0 +1,111 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
+ * Copyright (c) 2018 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_set_name_np, pthread_set_name_np);
+
+static void
+thr_set_name_np(struct pthread *thread, const char *name)
+{
+
+ free(thread->name);
+ thread->name = strdup(name);
+}
+
+/* Set the thread name for debug. */
+void
+_pthread_set_name_np(pthread_t thread, const char *name)
+{
+ struct pthread *curthread;
+
+ curthread = _get_curthread();
+ if (curthread == thread) {
+ THR_THREAD_LOCK(curthread, thread);
+ thr_set_name(thread->tid, name);
+ thr_set_name_np(thread, name);
+ THR_THREAD_UNLOCK(curthread, thread);
+ } else {
+ if (_thr_find_thread(curthread, thread, 0) == 0) {
+ if (thread->state != PS_DEAD) {
+ thr_set_name(thread->tid, name);
+ thr_set_name_np(thread, name);
+ }
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+ }
+}
+
+static void
+thr_get_name_np(struct pthread *thread, char *buf, size_t len)
+{
+
+ if (thread->name != NULL)
+ strlcpy(buf, thread->name, len);
+ else if (len > 0)
+ buf[0] = '\0';
+}
+
+__weak_reference(_pthread_get_name_np, pthread_get_name_np);
+
+void
+_pthread_get_name_np(pthread_t thread, char *buf, size_t len)
+{
+ struct pthread *curthread;
+
+ curthread = _get_curthread();
+ if (curthread == thread) {
+ THR_THREAD_LOCK(curthread, thread);
+ thr_get_name_np(thread, buf, len);
+ THR_THREAD_UNLOCK(curthread, thread);
+ } else {
+ if (_thr_find_thread(curthread, thread, 0) == 0) {
+ if (thread->state != PS_DEAD)
+ thr_get_name_np(thread, buf, len);
+ THR_THREAD_UNLOCK(curthread, thread);
+ } else if (len > 0)
+ buf[0] = '\0';
+ }
+}
diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c
new file mode 100644
index 0000000000000..65f7d111f73df
--- /dev/null
+++ b/lib/libthr/thread/thr_init.c
@@ -0,0 +1,496 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 2003 Daniel M. Eischen <deischen@freebsd.org>
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/signalvar.h>
+#include <sys/ioctl.h>
+#include <sys/link_elf.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <sys/ttycom.h>
+#include <sys/mman.h>
+#include <sys/rtprio.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <paths.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include "un-namespace.h"
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+char *_usrstack;
+struct pthread *_thr_initial;
+int _libthr_debug;
+int _thread_event_mask;
+struct pthread *_thread_last_event;
+pthreadlist _thread_list = TAILQ_HEAD_INITIALIZER(_thread_list);
+pthreadlist _thread_gc_list = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
+int _thread_active_threads = 1;
+atfork_head _thr_atfork_list = TAILQ_HEAD_INITIALIZER(_thr_atfork_list);
+struct urwlock _thr_atfork_lock = DEFAULT_URWLOCK;
+
+struct pthread_prio _thr_priorities[3] = {
+ {RTP_PRIO_MIN, RTP_PRIO_MAX, 0}, /* FIFO */
+ {0, 0, 63}, /* OTHER */
+ {RTP_PRIO_MIN, RTP_PRIO_MAX, 0} /* RR */
+};
+
+struct pthread_attr _pthread_attr_default = {
+ .sched_policy = SCHED_OTHER,
+ .sched_inherit = PTHREAD_INHERIT_SCHED,
+ .prio = 0,
+ .suspend = THR_CREATE_RUNNING,
+ .flags = PTHREAD_SCOPE_SYSTEM,
+ .stackaddr_attr = NULL,
+ .stacksize_attr = THR_STACK_DEFAULT,
+ .guardsize_attr = 0,
+ .cpusetsize = 0,
+ .cpuset = NULL
+};
+
+struct pthread_mutex_attr _pthread_mutexattr_default = {
+ .m_type = PTHREAD_MUTEX_DEFAULT,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_pshared = PTHREAD_PROCESS_PRIVATE,
+ .m_robust = PTHREAD_MUTEX_STALLED,
+};
+
+struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = {
+ .m_type = PTHREAD_MUTEX_ADAPTIVE_NP,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_pshared = PTHREAD_PROCESS_PRIVATE,
+ .m_robust = PTHREAD_MUTEX_STALLED,
+};
+
+/* Default condition variable attributes: */
+struct pthread_cond_attr _pthread_condattr_default = {
+ .c_pshared = PTHREAD_PROCESS_PRIVATE,
+ .c_clockid = CLOCK_REALTIME
+};
+
+int _thr_is_smp = 0;
+size_t _thr_guard_default;
+size_t _thr_stack_default = THR_STACK_DEFAULT;
+size_t _thr_stack_initial = THR_STACK_INITIAL;
+int _thr_page_size;
+int _thr_spinloops;
+int _thr_yieldloops;
+int _thr_queuefifo = 4;
+int _gc_count;
+struct umutex _mutex_static_lock = DEFAULT_UMUTEX;
+struct umutex _cond_static_lock = DEFAULT_UMUTEX;
+struct umutex _rwlock_static_lock = DEFAULT_UMUTEX;
+struct umutex _keytable_lock = DEFAULT_UMUTEX;
+struct urwlock _thr_list_lock = DEFAULT_URWLOCK;
+struct umutex _thr_event_lock = DEFAULT_UMUTEX;
+struct umutex _suspend_all_lock = DEFAULT_UMUTEX;
+struct pthread *_single_thread;
+int _suspend_all_cycle;
+int _suspend_all_waiters;
+
+int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
+int __pthread_mutex_lock(pthread_mutex_t *);
+int __pthread_mutex_trylock(pthread_mutex_t *);
+void _thread_init_hack(void) __attribute__ ((constructor));
+
+static void init_private(void);
+static void init_main_thread(struct pthread *thread);
+
+/*
+ * All weak references used within libc should be in this table.
+ * This is so that static libraries will work.
+ */
+
+STATIC_LIB_REQUIRE(_fork);
+STATIC_LIB_REQUIRE(_pthread_getspecific);
+STATIC_LIB_REQUIRE(_pthread_key_create);
+STATIC_LIB_REQUIRE(_pthread_key_delete);
+STATIC_LIB_REQUIRE(_pthread_mutex_destroy);
+STATIC_LIB_REQUIRE(_pthread_mutex_init);
+STATIC_LIB_REQUIRE(_pthread_mutex_lock);
+STATIC_LIB_REQUIRE(_pthread_mutex_trylock);
+STATIC_LIB_REQUIRE(_pthread_mutex_unlock);
+STATIC_LIB_REQUIRE(_pthread_mutexattr_init);
+STATIC_LIB_REQUIRE(_pthread_mutexattr_destroy);
+STATIC_LIB_REQUIRE(_pthread_mutexattr_settype);
+STATIC_LIB_REQUIRE(_pthread_once);
+STATIC_LIB_REQUIRE(_pthread_setspecific);
+STATIC_LIB_REQUIRE(_raise);
+STATIC_LIB_REQUIRE(_sem_destroy);
+STATIC_LIB_REQUIRE(_sem_getvalue);
+STATIC_LIB_REQUIRE(_sem_init);
+STATIC_LIB_REQUIRE(_sem_post);
+STATIC_LIB_REQUIRE(_sem_timedwait);
+STATIC_LIB_REQUIRE(_sem_trywait);
+STATIC_LIB_REQUIRE(_sem_wait);
+STATIC_LIB_REQUIRE(_sigaction);
+STATIC_LIB_REQUIRE(_sigprocmask);
+STATIC_LIB_REQUIRE(_sigsuspend);
+STATIC_LIB_REQUIRE(_sigtimedwait);
+STATIC_LIB_REQUIRE(_sigwait);
+STATIC_LIB_REQUIRE(_sigwaitinfo);
+STATIC_LIB_REQUIRE(_spinlock);
+STATIC_LIB_REQUIRE(_spinunlock);
+STATIC_LIB_REQUIRE(_thread_init_hack);
+
+/*
+ * These are needed when linking statically. All references within
+ * libgcc (and in the future libc) to these routines are weak, but
+ * if they are not (strongly) referenced by the application or other
+ * libraries, then the actual functions will not be loaded.
+ */
+STATIC_LIB_REQUIRE(_pthread_once);
+STATIC_LIB_REQUIRE(_pthread_key_create);
+STATIC_LIB_REQUIRE(_pthread_key_delete);
+STATIC_LIB_REQUIRE(_pthread_getspecific);
+STATIC_LIB_REQUIRE(_pthread_setspecific);
+STATIC_LIB_REQUIRE(_pthread_mutex_init);
+STATIC_LIB_REQUIRE(_pthread_mutex_destroy);
+STATIC_LIB_REQUIRE(_pthread_mutex_lock);
+STATIC_LIB_REQUIRE(_pthread_mutex_trylock);
+STATIC_LIB_REQUIRE(_pthread_mutex_unlock);
+STATIC_LIB_REQUIRE(_pthread_create);
+
+/* Pull in all symbols required by libthread_db */
+STATIC_LIB_REQUIRE(_thread_state_running);
+
+#define DUAL_ENTRY(entry) \
+ (pthread_func_t)entry, (pthread_func_t)entry
+
+static pthread_func_t jmp_table[][2] = {
+ {DUAL_ENTRY(_pthread_atfork)}, /* PJT_ATFORK */
+ {DUAL_ENTRY(_pthread_attr_destroy)}, /* PJT_ATTR_DESTROY */
+ {DUAL_ENTRY(_pthread_attr_getdetachstate)}, /* PJT_ATTR_GETDETACHSTATE */
+ {DUAL_ENTRY(_pthread_attr_getguardsize)}, /* PJT_ATTR_GETGUARDSIZE */
+ {DUAL_ENTRY(_pthread_attr_getinheritsched)}, /* PJT_ATTR_GETINHERITSCHED */
+ {DUAL_ENTRY(_pthread_attr_getschedparam)}, /* PJT_ATTR_GETSCHEDPARAM */
+ {DUAL_ENTRY(_pthread_attr_getschedpolicy)}, /* PJT_ATTR_GETSCHEDPOLICY */
+ {DUAL_ENTRY(_pthread_attr_getscope)}, /* PJT_ATTR_GETSCOPE */
+ {DUAL_ENTRY(_pthread_attr_getstackaddr)}, /* PJT_ATTR_GETSTACKADDR */
+ {DUAL_ENTRY(_pthread_attr_getstacksize)}, /* PJT_ATTR_GETSTACKSIZE */
+ {DUAL_ENTRY(_pthread_attr_init)}, /* PJT_ATTR_INIT */
+ {DUAL_ENTRY(_pthread_attr_setdetachstate)}, /* PJT_ATTR_SETDETACHSTATE */
+ {DUAL_ENTRY(_pthread_attr_setguardsize)}, /* PJT_ATTR_SETGUARDSIZE */
+ {DUAL_ENTRY(_pthread_attr_setinheritsched)}, /* PJT_ATTR_SETINHERITSCHED */
+ {DUAL_ENTRY(_pthread_attr_setschedparam)}, /* PJT_ATTR_SETSCHEDPARAM */
+ {DUAL_ENTRY(_pthread_attr_setschedpolicy)}, /* PJT_ATTR_SETSCHEDPOLICY */
+ {DUAL_ENTRY(_pthread_attr_setscope)}, /* PJT_ATTR_SETSCOPE */
+ {DUAL_ENTRY(_pthread_attr_setstackaddr)}, /* PJT_ATTR_SETSTACKADDR */
+ {DUAL_ENTRY(_pthread_attr_setstacksize)}, /* PJT_ATTR_SETSTACKSIZE */
+ {DUAL_ENTRY(_pthread_cancel)}, /* PJT_CANCEL */
+ {DUAL_ENTRY(_pthread_cleanup_pop)}, /* PJT_CLEANUP_POP */
+ {DUAL_ENTRY(_pthread_cleanup_push)}, /* PJT_CLEANUP_PUSH */
+ {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */
+ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */
+ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */
+ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */
+ {DUAL_ENTRY(_pthread_cond_timedwait)}, /* PJT_COND_TIMEDWAIT */
+ {(pthread_func_t)__pthread_cond_wait,
+ (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */
+ {DUAL_ENTRY(_pthread_detach)}, /* PJT_DETACH */
+ {DUAL_ENTRY(_pthread_equal)}, /* PJT_EQUAL */
+ {DUAL_ENTRY(_pthread_exit)}, /* PJT_EXIT */
+ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */
+ {DUAL_ENTRY(_pthread_join)}, /* PJT_JOIN */
+ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */
+ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/
+ {DUAL_ENTRY(_pthread_kill)}, /* PJT_KILL */
+ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */
+ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */
+ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */
+ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */
+ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */
+ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */
+ {(pthread_func_t)__pthread_mutex_lock,
+ (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */
+ {(pthread_func_t)__pthread_mutex_trylock,
+ (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */
+ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */
+ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */
+ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */
+ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */
+ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */
+ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */
+ {DUAL_ENTRY(_pthread_setcancelstate)}, /* PJT_SETCANCELSTATE */
+ {DUAL_ENTRY(_pthread_setcanceltype)}, /* PJT_SETCANCELTYPE */
+ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */
+ {DUAL_ENTRY(_pthread_sigmask)}, /* PJT_SIGMASK */
+ {DUAL_ENTRY(_pthread_testcancel)}, /* PJT_TESTCANCEL */
+ {DUAL_ENTRY(__pthread_cleanup_pop_imp)},/* PJT_CLEANUP_POP_IMP */
+ {DUAL_ENTRY(__pthread_cleanup_push_imp)},/* PJT_CLEANUP_PUSH_IMP */
+ {DUAL_ENTRY(_pthread_cancel_enter)}, /* PJT_CANCEL_ENTER */
+ {DUAL_ENTRY(_pthread_cancel_leave)}, /* PJT_CANCEL_LEAVE */
+ {DUAL_ENTRY(_pthread_mutex_consistent)},/* PJT_MUTEX_CONSISTENT */
+ {DUAL_ENTRY(_pthread_mutexattr_getrobust)},/* PJT_MUTEXATTR_GETROBUST */
+ {DUAL_ENTRY(_pthread_mutexattr_setrobust)},/* PJT_MUTEXATTR_SETROBUST */
+};
+
+static int init_once = 0;
+
+/*
+ * For the shared version of the threads library, the above is sufficient.
+ * But for the archive version of the library, we need a little bit more.
+ * Namely, we must arrange for this particular module to be pulled in from
+ * the archive library at link time. To accomplish that, we define and
+ * initialize a variable, "_thread_autoinit_dummy_decl". This variable is
+ * referenced (as an extern) from libc/stdlib/exit.c. This will always
+ * create a need for this module, ensuring that it is present in the
+ * executable.
+ */
+extern int _thread_autoinit_dummy_decl;
+int _thread_autoinit_dummy_decl = 0;
+
+void
+_thread_init_hack(void)
+{
+
+ _libpthread_init(NULL);
+}
+
+
+/*
+ * Threaded process initialization.
+ *
+ * This is only called under two conditions:
+ *
+ * 1) Some thread routines have detected that the library hasn't yet
+ * been initialized (_thr_initial == NULL && curthread == NULL), or
+ *
+ * 2) An explicit call to reinitialize after a fork (indicated
+ * by curthread != NULL)
+ */
+void
+_libpthread_init(struct pthread *curthread)
+{
+ int first, dlopened;
+
+ /* Check if this function has already been called: */
+ if (_thr_initial != NULL && curthread == NULL)
+ /* Only initialize the threaded application once. */
+ return;
+
+ /*
+ * Check the size of the jump table to make sure it is preset
+ * with the correct number of entries.
+ */
+ if (sizeof(jmp_table) != sizeof(pthread_func_t) * PJT_MAX * 2)
+ PANIC("Thread jump table not properly initialized");
+ memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
+ __thr_interpose_libc();
+
+ /* Initialize pthread private data. */
+ init_private();
+
+ /* Set the initial thread. */
+ if (curthread == NULL) {
+ first = 1;
+ /* Create and initialize the initial thread. */
+ curthread = _thr_alloc(NULL);
+ if (curthread == NULL)
+ PANIC("Can't allocate initial thread");
+ init_main_thread(curthread);
+ } else {
+ first = 0;
+ }
+
+ /*
+ * Add the thread to the thread list queue.
+ */
+ THR_LIST_ADD(curthread);
+ _thread_active_threads = 1;
+
+ /* Setup the thread specific data */
+ _tcb_set(curthread->tcb);
+
+ if (first) {
+ _thr_initial = curthread;
+ dlopened = _rtld_is_dlopened(&_thread_autoinit_dummy_decl) != 0;
+ _thr_signal_init(dlopened);
+ if (_thread_event_mask & TD_CREATE)
+ _thr_report_creation(curthread, curthread);
+ /*
+ * Always use our rtld lock implementation.
+ * It is faster because it postpones signal handlers
+ * instead of calling sigprocmask(2).
+ */
+ _thr_rtld_init();
+ }
+}
+
+/*
+ * This function and pthread_create() do a lot of the same things.
+ * It'd be nice to consolidate the common stuff in one place.
+ */
+static void
+init_main_thread(struct pthread *thread)
+{
+ struct sched_param sched_param;
+ int i;
+
+ /* Setup the thread attributes. */
+ thr_self(&thread->tid);
+ thread->attr = _pthread_attr_default;
+ /*
+ * Set up the thread stack.
+ *
+ * Create a red zone below the main stack. All other stacks
+ * are constrained to a maximum size by the parameters
+ * passed to mmap(), but this stack is only limited by
+ * resource limits, so this stack needs an explicitly mapped
+ * red zone to protect the thread stack that is just beyond.
+ */
+ if (mmap(_usrstack - _thr_stack_initial -
+ _thr_guard_default, _thr_guard_default, 0, MAP_ANON,
+ -1, 0) == MAP_FAILED)
+ PANIC("Cannot allocate red zone for initial thread");
+
+ /*
+ * Mark the stack as an application supplied stack so that it
+ * isn't deallocated.
+ *
+ * XXX - I'm not sure it would hurt anything to deallocate
+ * the main thread stack because deallocation doesn't
+ * actually free() it; it just puts it in the free
+ * stack queue for later reuse.
+ */
+ thread->attr.stackaddr_attr = _usrstack - _thr_stack_initial;
+ thread->attr.stacksize_attr = _thr_stack_initial;
+ thread->attr.guardsize_attr = _thr_guard_default;
+ thread->attr.flags |= THR_STACK_USER;
+
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ thread->magic = THR_MAGIC;
+
+ thread->cancel_enable = 1;
+ thread->cancel_async = 0;
+
+ /* Initialize the mutex queues */
+ for (i = 0; i < TMQ_NITEMS; i++)
+ TAILQ_INIT(&thread->mq[i]);
+
+ thread->state = PS_RUNNING;
+
+ _thr_getscheduler(thread->tid, &thread->attr.sched_policy,
+ &sched_param);
+ thread->attr.prio = sched_param.sched_priority;
+
+#ifdef _PTHREAD_FORCED_UNWIND
+ thread->unwind_stackend = _usrstack;
+#endif
+
+ /* Others cleared to zero by thr_alloc() */
+}
+
+static void
+init_private(void)
+{
+ struct rlimit rlim;
+ size_t len;
+ int mib[2];
+ char *env, *env_bigstack, *env_splitstack;
+
+ _thr_umutex_init(&_mutex_static_lock);
+ _thr_umutex_init(&_cond_static_lock);
+ _thr_umutex_init(&_rwlock_static_lock);
+ _thr_umutex_init(&_keytable_lock);
+ _thr_urwlock_init(&_thr_atfork_lock);
+ _thr_umutex_init(&_thr_event_lock);
+ _thr_umutex_init(&_suspend_all_lock);
+ _thr_spinlock_init();
+ _thr_list_init();
+ _thr_wake_addr_init();
+ _sleepq_init();
+ _single_thread = NULL;
+ _suspend_all_waiters = 0;
+
+ /*
+ * Avoid reinitializing some things if they don't need to be,
+ * e.g. after a fork().
+ */
+ if (init_once == 0) {
+ __thr_pshared_init();
+ /* Find the stack top */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_USRSTACK;
+ len = sizeof (_usrstack);
+ if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
+ PANIC("Cannot get kern.usrstack from sysctl");
+ env_bigstack = getenv("LIBPTHREAD_BIGSTACK_MAIN");
+ env_splitstack = getenv("LIBPTHREAD_SPLITSTACK_MAIN");
+ if (env_bigstack != NULL || env_splitstack == NULL) {
+ if (getrlimit(RLIMIT_STACK, &rlim) == -1)
+ PANIC("Cannot get stack rlimit");
+ _thr_stack_initial = rlim.rlim_cur;
+ }
+ len = sizeof(_thr_is_smp);
+ sysctlbyname("kern.smp.cpus", &_thr_is_smp, &len, NULL, 0);
+ _thr_is_smp = (_thr_is_smp > 1);
+ _thr_page_size = getpagesize();
+ _thr_guard_default = _thr_page_size;
+ _pthread_attr_default.guardsize_attr = _thr_guard_default;
+ _pthread_attr_default.stacksize_attr = _thr_stack_default;
+ env = getenv("LIBPTHREAD_SPINLOOPS");
+ if (env)
+ _thr_spinloops = atoi(env);
+ env = getenv("LIBPTHREAD_YIELDLOOPS");
+ if (env)
+ _thr_yieldloops = atoi(env);
+ env = getenv("LIBPTHREAD_QUEUE_FIFO");
+ if (env)
+ _thr_queuefifo = atoi(env);
+ TAILQ_INIT(&_thr_atfork_list);
+ }
+ init_once = 1;
+}
diff --git a/lib/libthr/thread/thr_join.c b/lib/libthr/thread/thr_join.c
new file mode 100644
index 0000000000000..f6136e479b522
--- /dev/null
+++ b/lib/libthr/thread/thr_join.c
@@ -0,0 +1,151 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+int _pthread_timedjoin_np(pthread_t pthread, void **thread_return,
+ const struct timespec *abstime);
+static int join_common(pthread_t, void **, const struct timespec *);
+
+__weak_reference(_pthread_join, pthread_join);
+__weak_reference(_pthread_timedjoin_np, pthread_timedjoin_np);
+
+static void backout_join(void *arg)
+{
+ struct pthread *pthread = (struct pthread *)arg;
+ struct pthread *curthread = _get_curthread();
+
+ THR_THREAD_LOCK(curthread, pthread);
+ pthread->joiner = NULL;
+ THR_THREAD_UNLOCK(curthread, pthread);
+}
+
+int
+_pthread_join(pthread_t pthread, void **thread_return)
+{
+ return (join_common(pthread, thread_return, NULL));
+}
+
+int
+_pthread_timedjoin_np(pthread_t pthread, void **thread_return,
+ const struct timespec *abstime)
+{
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ return (join_common(pthread, thread_return, abstime));
+}
+
+/*
+ * Cancellation behavior:
+ * if the thread is canceled, joinee is not recycled.
+ */
+static int
+join_common(pthread_t pthread, void **thread_return,
+ const struct timespec *abstime)
+{
+ struct pthread *curthread = _get_curthread();
+ struct timespec ts, ts2, *tsp;
+ void *tmp;
+ long tid;
+ int ret = 0;
+
+ if (pthread == NULL)
+ return (EINVAL);
+
+ if (pthread == curthread)
+ return (EDEADLK);
+
+ if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0)
+ return (ESRCH);
+
+ if ((pthread->flags & THR_FLAGS_DETACHED) != 0) {
+ ret = EINVAL;
+ } else if (pthread->joiner != NULL) {
+ /* Multiple joiners are not supported. */
+ ret = ENOTSUP;
+ }
+ if (ret) {
+ THR_THREAD_UNLOCK(curthread, pthread);
+ return (ret);
+ }
+ /* Set the running thread to be the joiner: */
+ pthread->joiner = curthread;
+
+ THR_THREAD_UNLOCK(curthread, pthread);
+
+ THR_CLEANUP_PUSH(curthread, backout_join, pthread);
+ _thr_cancel_enter(curthread);
+
+ tid = pthread->tid;
+ while (pthread->tid != TID_TERMINATED) {
+ _thr_testcancel(curthread);
+ if (abstime != NULL) {
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ if (ts2.tv_sec < 0) {
+ ret = ETIMEDOUT;
+ break;
+ }
+ tsp = &ts2;
+ } else
+ tsp = NULL;
+ ret = _thr_umtx_wait(&pthread->tid, tid, tsp);
+ if (ret == ETIMEDOUT)
+ break;
+ }
+
+ _thr_cancel_leave(curthread, 0);
+ THR_CLEANUP_POP(curthread, 0);
+
+ if (ret == ETIMEDOUT) {
+ THR_THREAD_LOCK(curthread, pthread);
+ pthread->joiner = NULL;
+ THR_THREAD_UNLOCK(curthread, pthread);
+ } else {
+ ret = 0;
+ tmp = pthread->ret;
+ THR_THREAD_LOCK(curthread, pthread);
+ pthread->flags |= THR_FLAGS_DETACHED;
+ pthread->joiner = NULL;
+ _thr_try_gc(curthread, pthread); /* thread lock released */
+
+ if (thread_return != NULL)
+ *thread_return = tmp;
+ }
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_kern.c b/lib/libthr/thread/thr_kern.c
new file mode 100644
index 0000000000000..77ccc68478286
--- /dev/null
+++ b/lib/libthr/thread/thr_kern.c
@@ -0,0 +1,215 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/signalvar.h>
+#include <sys/rtprio.h>
+#include <sys/mman.h>
+#include <pthread.h>
+
+#include "thr_private.h"
+
+/*#define DEBUG_THREAD_KERN */
+#ifdef DEBUG_THREAD_KERN
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
+
+static struct umutex addr_lock;
+static struct wake_addr *wake_addr_head;
+static struct wake_addr default_wake_addr;
+
+/*
+ * This is called when the first thread (other than the initial
+ * thread) is created.
+ */
+int
+_thr_setthreaded(int threaded)
+{
+ if (((threaded == 0) ^ (__isthreaded == 0)) == 0)
+ return (0);
+
+ __isthreaded = threaded;
+ return (0);
+}
+
+void
+_thr_assert_lock_level(void)
+{
+ PANIC("locklevel <= 0");
+}
+
+int
+_rtp_to_schedparam(const struct rtprio *rtp, int *policy,
+ struct sched_param *param)
+{
+ switch(rtp->type) {
+ case RTP_PRIO_REALTIME:
+ *policy = SCHED_RR;
+ param->sched_priority = RTP_PRIO_MAX - rtp->prio;
+ break;
+ case RTP_PRIO_FIFO:
+ *policy = SCHED_FIFO;
+ param->sched_priority = RTP_PRIO_MAX - rtp->prio;
+ break;
+ default:
+ *policy = SCHED_OTHER;
+ param->sched_priority = 0;
+ break;
+ }
+ return (0);
+}
+
+int
+_schedparam_to_rtp(int policy, const struct sched_param *param,
+ struct rtprio *rtp)
+{
+ switch(policy) {
+ case SCHED_RR:
+ rtp->type = RTP_PRIO_REALTIME;
+ rtp->prio = RTP_PRIO_MAX - param->sched_priority;
+ break;
+ case SCHED_FIFO:
+ rtp->type = RTP_PRIO_FIFO;
+ rtp->prio = RTP_PRIO_MAX - param->sched_priority;
+ break;
+ case SCHED_OTHER:
+ default:
+ rtp->type = RTP_PRIO_NORMAL;
+ rtp->prio = 0;
+ break;
+ }
+ return (0);
+}
+
+int
+_thr_getscheduler(lwpid_t lwpid, int *policy, struct sched_param *param)
+{
+ struct rtprio rtp;
+ int ret;
+
+ ret = rtprio_thread(RTP_LOOKUP, lwpid, &rtp);
+ if (ret == -1)
+ return (ret);
+ _rtp_to_schedparam(&rtp, policy, param);
+ return (0);
+}
+
+int
+_thr_setscheduler(lwpid_t lwpid, int policy, const struct sched_param *param)
+{
+ struct rtprio rtp;
+
+ _schedparam_to_rtp(policy, param, &rtp);
+ return (rtprio_thread(RTP_SET, lwpid, &rtp));
+}
+
+void
+_thr_wake_addr_init(void)
+{
+ _thr_umutex_init(&addr_lock);
+ wake_addr_head = NULL;
+}
+
+/*
+ * Allocate wake-address, the memory area is never freed after
+ * allocated, this becauses threads may be referencing it.
+ */
+struct wake_addr *
+_thr_alloc_wake_addr(void)
+{
+ struct pthread *curthread;
+ struct wake_addr *p;
+
+ if (_thr_initial == NULL) {
+ return &default_wake_addr;
+ }
+
+ curthread = _get_curthread();
+
+ THR_LOCK_ACQUIRE(curthread, &addr_lock);
+ if (wake_addr_head == NULL) {
+ unsigned i;
+ unsigned pagesize = getpagesize();
+ struct wake_addr *pp = (struct wake_addr *)
+ mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0);
+ for (i = 1; i < pagesize/sizeof(struct wake_addr); ++i)
+ pp[i].link = &pp[i+1];
+ pp[i-1].link = NULL;
+ wake_addr_head = &pp[1];
+ p = &pp[0];
+ } else {
+ p = wake_addr_head;
+ wake_addr_head = p->link;
+ }
+ THR_LOCK_RELEASE(curthread, &addr_lock);
+ p->value = 0;
+ return (p);
+}
+
+void
+_thr_release_wake_addr(struct wake_addr *wa)
+{
+ struct pthread *curthread = _get_curthread();
+
+ if (wa == &default_wake_addr)
+ return;
+ THR_LOCK_ACQUIRE(curthread, &addr_lock);
+ wa->link = wake_addr_head;
+ wake_addr_head = wa;
+ THR_LOCK_RELEASE(curthread, &addr_lock);
+}
+
+/* Sleep on thread wakeup address */
+int
+_thr_sleep(struct pthread *curthread, int clockid,
+ const struct timespec *abstime)
+{
+
+ if (curthread->wake_addr->value != 0)
+ return (0);
+
+ return _thr_umtx_timedwait_uint(&curthread->wake_addr->value, 0,
+ clockid, abstime, 0);
+}
+
+void
+_thr_wake_all(unsigned int *waddrs[], int count)
+{
+ int i;
+
+ for (i = 0; i < count; ++i)
+ *waddrs[i] = 1;
+ _umtx_op(waddrs, UMTX_OP_NWAKE_PRIVATE, count, NULL, NULL);
+}
diff --git a/lib/libthr/thread/thr_kill.c b/lib/libthr/thread/thr_kill.c
new file mode 100644
index 0000000000000..59b8b1567011b
--- /dev/null
+++ b/lib/libthr/thread/thr_kill.c
@@ -0,0 +1,76 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <signal.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_kill, pthread_kill);
+
+int
+_pthread_kill(pthread_t pthread, int sig)
+{
+ struct pthread *curthread;
+ int ret;
+
+ /* Check for invalid signal numbers: */
+ if (sig < 0 || sig > _SIG_MAXSIG)
+ /* Invalid signal: */
+ return (EINVAL);
+
+ curthread = _get_curthread();
+
+ /*
+ * Ensure the thread is in the list of active threads, and the
+ * signal is valid (signal 0 specifies error checking only) and
+ * not being ignored:
+ */
+ if (curthread == pthread) {
+ if (sig > 0)
+ _thr_send_sig(pthread, sig);
+ ret = 0;
+ } else if ((ret = _thr_find_thread(curthread, pthread,
+ /*include dead*/0)) == 0) {
+ if (sig > 0)
+ _thr_send_sig(pthread, sig);
+ THR_THREAD_UNLOCK(curthread, pthread);
+ }
+
+ /* Return the completion status: */
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_list.c b/lib/libthr/thread/thr_list.c
new file mode 100644
index 0000000000000..530e49721a4b2
--- /dev/null
+++ b/lib/libthr/thread/thr_list.c
@@ -0,0 +1,364 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+/*#define DEBUG_THREAD_LIST */
+#ifdef DEBUG_THREAD_LIST
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
+
+#define MAX_THREADS 100000
+
+/*
+ * Define a high water mark for the maximum number of threads that
+ * will be cached. Once this level is reached, any extra threads
+ * will be free()'d.
+ */
+#define MAX_CACHED_THREADS 100
+
+/*
+ * We've got to keep track of everything that is allocated, not only
+ * to have a speedy free list, but also so they can be deallocated
+ * after a fork().
+ */
+static TAILQ_HEAD(, pthread) free_threadq;
+static struct umutex free_thread_lock = DEFAULT_UMUTEX;
+static struct umutex tcb_lock = DEFAULT_UMUTEX;
+static int free_thread_count = 0;
+static int inited = 0;
+static int total_threads;
+
+LIST_HEAD(thread_hash_head, pthread);
+#define HASH_QUEUES 128
+static struct thread_hash_head thr_hashtable[HASH_QUEUES];
+#define THREAD_HASH(thrd) (((unsigned long)thrd >> 8) % HASH_QUEUES)
+
+static void thr_destroy(struct pthread *curthread, struct pthread *thread);
+
+void
+_thr_list_init(void)
+{
+ int i;
+
+ _gc_count = 0;
+ total_threads = 1;
+ _thr_urwlock_init(&_thr_list_lock);
+ TAILQ_INIT(&_thread_list);
+ TAILQ_INIT(&free_threadq);
+ _thr_umutex_init(&free_thread_lock);
+ _thr_umutex_init(&tcb_lock);
+ if (inited) {
+ for (i = 0; i < HASH_QUEUES; ++i)
+ LIST_INIT(&thr_hashtable[i]);
+ }
+ inited = 1;
+}
+
+void
+_thr_gc(struct pthread *curthread)
+{
+ struct pthread *td, *td_next;
+ TAILQ_HEAD(, pthread) worklist;
+
+ TAILQ_INIT(&worklist);
+ THREAD_LIST_WRLOCK(curthread);
+
+ /* Check the threads waiting for GC. */
+ TAILQ_FOREACH_SAFE(td, &_thread_gc_list, gcle, td_next) {
+ if (td->tid != TID_TERMINATED) {
+ /* make sure we are not still in userland */
+ continue;
+ }
+ _thr_stack_free(&td->attr);
+ THR_GCLIST_REMOVE(td);
+ TAILQ_INSERT_HEAD(&worklist, td, gcle);
+ }
+ THREAD_LIST_UNLOCK(curthread);
+
+ while ((td = TAILQ_FIRST(&worklist)) != NULL) {
+ TAILQ_REMOVE(&worklist, td, gcle);
+ /*
+ * XXX we don't free initial thread, because there might
+ * have some code referencing initial thread.
+ */
+ if (td == _thr_initial) {
+ DBG_MSG("Initial thread won't be freed\n");
+ continue;
+ }
+
+ _thr_free(curthread, td);
+ }
+}
+
+struct pthread *
+_thr_alloc(struct pthread *curthread)
+{
+ struct pthread *thread = NULL;
+ struct tcb *tcb;
+
+ if (curthread != NULL) {
+ if (GC_NEEDED())
+ _thr_gc(curthread);
+ if (free_thread_count > 0) {
+ THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
+ if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
+ TAILQ_REMOVE(&free_threadq, thread, tle);
+ free_thread_count--;
+ }
+ THR_LOCK_RELEASE(curthread, &free_thread_lock);
+ }
+ }
+ if (thread == NULL) {
+ if (total_threads > MAX_THREADS)
+ return (NULL);
+ atomic_fetchadd_int(&total_threads, 1);
+ thread = calloc(1, sizeof(struct pthread));
+ if (thread == NULL) {
+ atomic_fetchadd_int(&total_threads, -1);
+ return (NULL);
+ }
+ if ((thread->sleepqueue = _sleepq_alloc()) == NULL ||
+ (thread->wake_addr = _thr_alloc_wake_addr()) == NULL) {
+ thr_destroy(curthread, thread);
+ atomic_fetchadd_int(&total_threads, -1);
+ return (NULL);
+ }
+ } else {
+ bzero(&thread->_pthread_startzero,
+ __rangeof(struct pthread, _pthread_startzero, _pthread_endzero));
+ }
+ if (curthread != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &tcb_lock);
+ tcb = _tcb_ctor(thread, 0 /* not initial tls */);
+ THR_LOCK_RELEASE(curthread, &tcb_lock);
+ } else {
+ tcb = _tcb_ctor(thread, 1 /* initial tls */);
+ }
+ if (tcb != NULL) {
+ thread->tcb = tcb;
+ } else {
+ thr_destroy(curthread, thread);
+ atomic_fetchadd_int(&total_threads, -1);
+ thread = NULL;
+ }
+ return (thread);
+}
+
+void
+_thr_free(struct pthread *curthread, struct pthread *thread)
+{
+ DBG_MSG("Freeing thread %p\n", thread);
+
+ /*
+ * Always free tcb, as we only know it is part of RTLD TLS
+ * block, but don't know its detail and can not assume how
+ * it works, so better to avoid caching it here.
+ */
+ if (curthread != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &tcb_lock);
+ _tcb_dtor(thread->tcb);
+ THR_LOCK_RELEASE(curthread, &tcb_lock);
+ } else {
+ _tcb_dtor(thread->tcb);
+ }
+ thread->tcb = NULL;
+ if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
+ thr_destroy(curthread, thread);
+ atomic_fetchadd_int(&total_threads, -1);
+ } else {
+ /*
+ * Add the thread to the free thread list, this also avoids
+ * pthread id is reused too quickly, may help some buggy apps.
+ */
+ THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
+ TAILQ_INSERT_TAIL(&free_threadq, thread, tle);
+ free_thread_count++;
+ THR_LOCK_RELEASE(curthread, &free_thread_lock);
+ }
+}
+
+static void
+thr_destroy(struct pthread *curthread __unused, struct pthread *thread)
+{
+ if (thread->sleepqueue != NULL)
+ _sleepq_free(thread->sleepqueue);
+ if (thread->wake_addr != NULL)
+ _thr_release_wake_addr(thread->wake_addr);
+ free(thread);
+}
+
+/*
+ * Add the thread to the list of all threads and increment
+ * number of active threads.
+ */
+void
+_thr_link(struct pthread *curthread, struct pthread *thread)
+{
+ THREAD_LIST_WRLOCK(curthread);
+ THR_LIST_ADD(thread);
+ THREAD_LIST_UNLOCK(curthread);
+ atomic_add_int(&_thread_active_threads, 1);
+}
+
+/*
+ * Remove an active thread.
+ */
+void
+_thr_unlink(struct pthread *curthread, struct pthread *thread)
+{
+ THREAD_LIST_WRLOCK(curthread);
+ THR_LIST_REMOVE(thread);
+ THREAD_LIST_UNLOCK(curthread);
+ atomic_add_int(&_thread_active_threads, -1);
+}
+
+void
+_thr_hash_add(struct pthread *thread)
+{
+ struct thread_hash_head *head;
+
+ head = &thr_hashtable[THREAD_HASH(thread)];
+ LIST_INSERT_HEAD(head, thread, hle);
+}
+
+void
+_thr_hash_remove(struct pthread *thread)
+{
+ LIST_REMOVE(thread, hle);
+}
+
+struct pthread *
+_thr_hash_find(struct pthread *thread)
+{
+ struct pthread *td;
+ struct thread_hash_head *head;
+
+ head = &thr_hashtable[THREAD_HASH(thread)];
+ LIST_FOREACH(td, head, hle) {
+ if (td == thread)
+ return (thread);
+ }
+ return (NULL);
+}
+
+/*
+ * Find a thread in the linked list of active threads and add a reference
+ * to it. Threads with positive reference counts will not be deallocated
+ * until all references are released.
+ */
+int
+_thr_ref_add(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
+{
+ int ret;
+
+ if (thread == NULL)
+ /* Invalid thread: */
+ return (EINVAL);
+
+ if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
+ thread->refcount++;
+ THR_CRITICAL_ENTER(curthread);
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+
+ /* Return zero if the thread exists: */
+ return (ret);
+}
+
+void
+_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
+{
+ THR_THREAD_LOCK(curthread, thread);
+ thread->refcount--;
+ _thr_try_gc(curthread, thread);
+ THR_CRITICAL_LEAVE(curthread);
+}
+
+/* entered with thread lock held, exit with thread lock released */
+void
+_thr_try_gc(struct pthread *curthread, struct pthread *thread)
+{
+ if (THR_SHOULD_GC(thread)) {
+ THR_REF_ADD(curthread, thread);
+ THR_THREAD_UNLOCK(curthread, thread);
+ THREAD_LIST_WRLOCK(curthread);
+ THR_THREAD_LOCK(curthread, thread);
+ THR_REF_DEL(curthread, thread);
+ if (THR_SHOULD_GC(thread)) {
+ THR_LIST_REMOVE(thread);
+ THR_GCLIST_ADD(thread);
+ }
+ THR_THREAD_UNLOCK(curthread, thread);
+ THREAD_LIST_UNLOCK(curthread);
+ } else {
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+}
+
+/* return with thread lock held if thread is found */
+int
+_thr_find_thread(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
+{
+ struct pthread *pthread;
+ int ret;
+
+ if (thread == NULL)
+ return (EINVAL);
+
+ ret = 0;
+ THREAD_LIST_RDLOCK(curthread);
+ pthread = _thr_hash_find(thread);
+ if (pthread) {
+ THR_THREAD_LOCK(curthread, pthread);
+ if (include_dead == 0 && pthread->state == PS_DEAD) {
+ THR_THREAD_UNLOCK(curthread, pthread);
+ ret = ESRCH;
+ }
+ } else {
+ ret = ESRCH;
+ }
+ THREAD_LIST_UNLOCK(curthread);
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_main_np.c b/lib/libthr/thread/thr_main_np.c
new file mode 100644
index 0000000000000..7ce54bc9f1f01
--- /dev/null
+++ b/lib/libthr/thread/thr_main_np.c
@@ -0,0 +1,53 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2001 Alfred Perlstein
+ * Author: Alfred Perlstein <alfred@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_main_np, pthread_main_np);
+
+/*
+ * Provide the equivalent to Solaris thr_main() function.
+ */
+int
+_pthread_main_np(void)
+{
+
+ if (!_thr_initial)
+ return (-1);
+ else
+ return (_pthread_equal(_pthread_self(), _thr_initial) ? 1 : 0);
+}
diff --git a/lib/libthr/thread/thr_multi_np.c b/lib/libthr/thread/thr_multi_np.c
new file mode 100644
index 0000000000000..59e782cc59b2e
--- /dev/null
+++ b/lib/libthr/thread/thr_multi_np.c
@@ -0,0 +1,53 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1996 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+__weak_reference(_pthread_multi_np, pthread_multi_np);
+
+int
+_pthread_multi_np(void)
+{
+
+ /* Return to multi-threaded scheduling mode: */
+ /*
+ * XXX - Do we want to do this?
+ * __is_threaded = 1;
+ */
+ _pthread_resume_all_np();
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
new file mode 100644
index 0000000000000..d2d9f5b54c100
--- /dev/null
+++ b/lib/libthr/thread/thr_mutex.c
@@ -0,0 +1,1190 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
+ * Copyright (c) 2015, 2016 The FreeBSD Foundation
+ *
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+_Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE,
+ "pthread_mutex is too large for off-page");
+
+/*
+ * For adaptive mutexes, how many times to spin doing trylock2
+ * before entering the kernel to block
+ */
+#define MUTEX_ADAPTIVE_SPINS 2000
+
+/*
+ * Prototypes
+ */
+int __pthread_mutex_consistent(pthread_mutex_t *mutex);
+int __pthread_mutex_init(pthread_mutex_t * __restrict mutex,
+ const pthread_mutexattr_t * __restrict mutex_attr);
+int __pthread_mutex_trylock(pthread_mutex_t *mutex);
+int __pthread_mutex_lock(pthread_mutex_t *mutex);
+int __pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
+ const struct timespec * __restrict abstime);
+int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
+int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
+int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
+int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
+int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
+int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
+
+static int mutex_self_trylock(pthread_mutex_t);
+static int mutex_self_lock(pthread_mutex_t,
+ const struct timespec *abstime);
+static int mutex_unlock_common(struct pthread_mutex *, bool, int *);
+static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
+ const struct timespec *);
+static void mutex_init_robust(struct pthread *curthread);
+static int mutex_qidx(struct pthread_mutex *m);
+static bool is_robust_mutex(struct pthread_mutex *m);
+static bool is_pshared_mutex(struct pthread_mutex *m);
+
+__weak_reference(__pthread_mutex_init, pthread_mutex_init);
+__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
+__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
+__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
+__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
+__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
+__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
+__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
+__weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent);
+__strong_reference(_pthread_mutex_consistent, __pthread_mutex_consistent);
+
+/* Single underscore versions provided for libc internal usage: */
+/* No difference between libc and application usage of these: */
+__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
+__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
+
+__weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
+__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
+
+__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
+__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
+__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
+
+__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
+__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
+__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
+__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
+
+static void
+mutex_init_link(struct pthread_mutex *m)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ m->m_qe.tqe_prev = NULL;
+ m->m_qe.tqe_next = NULL;
+ m->m_pqe.tqe_prev = NULL;
+ m->m_pqe.tqe_next = NULL;
+#endif
+}
+
+static void
+mutex_assert_is_owned(struct pthread_mutex *m __unused)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ if (__predict_false(m->m_qe.tqe_prev == NULL))
+ PANIC("mutex %p own %#x is not on list %p %p",
+ m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
+#endif
+}
+
+static void
+mutex_assert_not_owned(struct pthread *curthread __unused,
+ struct pthread_mutex *m __unused)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ if (__predict_false(m->m_qe.tqe_prev != NULL ||
+ m->m_qe.tqe_next != NULL))
+ PANIC("mutex %p own %#x is on list %p %p",
+ m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
+ if (__predict_false(is_robust_mutex(m) &&
+ (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL ||
+ (is_pshared_mutex(m) && curthread->robust_list ==
+ (uintptr_t)&m->m_lock) ||
+ (!is_pshared_mutex(m) && curthread->priv_robust_list ==
+ (uintptr_t)&m->m_lock))))
+ PANIC(
+ "mutex %p own %#x is on robust linkage %p %p head %p phead %p",
+ m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk,
+ m->m_rb_prev, (void *)curthread->robust_list,
+ (void *)curthread->priv_robust_list);
+#endif
+}
+
+static bool
+is_pshared_mutex(struct pthread_mutex *m)
+{
+
+ return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
+}
+
+static bool
+is_robust_mutex(struct pthread_mutex *m)
+{
+
+ return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0);
+}
+
+int
+_mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ if (__predict_false(curthread->inact_mtx != 0))
+ PANIC("inact_mtx enter");
+#endif
+ if (!is_robust_mutex(m))
+ return (0);
+
+ mutex_init_robust(curthread);
+ curthread->inact_mtx = (uintptr_t)&m->m_lock;
+ return (1);
+}
+
+void
+_mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused)
+{
+
+#if defined(_PTHREADS_INVARIANTS)
+ if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock))
+ PANIC("inact_mtx leave");
+#endif
+ curthread->inact_mtx = 0;
+}
+
+static int
+mutex_check_attr(const struct pthread_mutex_attr *attr)
+{
+
+ if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
+ attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
+ return (EINVAL);
+ if (attr->m_protocol < PTHREAD_PRIO_NONE ||
+ attr->m_protocol > PTHREAD_PRIO_PROTECT)
+ return (EINVAL);
+ return (0);
+}
+
+static void
+mutex_init_robust(struct pthread *curthread)
+{
+ struct umtx_robust_lists_params rb;
+
+ if (curthread == NULL)
+ curthread = _get_curthread();
+ if (curthread->robust_inited)
+ return;
+ rb.robust_list_offset = (uintptr_t)&curthread->robust_list;
+ rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list;
+ rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx;
+ _umtx_op(NULL, UMTX_OP_ROBUST_LISTS, sizeof(rb), &rb, NULL);
+ curthread->robust_inited = 1;
+}
+
+static void
+mutex_init_body(struct pthread_mutex *pmutex,
+ const struct pthread_mutex_attr *attr)
+{
+
+ pmutex->m_flags = attr->m_type;
+ pmutex->m_count = 0;
+ pmutex->m_spinloops = 0;
+ pmutex->m_yieldloops = 0;
+ mutex_init_link(pmutex);
+ switch (attr->m_protocol) {
+ case PTHREAD_PRIO_NONE:
+ pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
+ pmutex->m_lock.m_flags = 0;
+ break;
+ case PTHREAD_PRIO_INHERIT:
+ pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
+ pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
+ break;
+ case PTHREAD_PRIO_PROTECT:
+ pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
+ pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
+ pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
+ break;
+ }
+ if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
+ pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
+ if (attr->m_robust == PTHREAD_MUTEX_ROBUST) {
+ mutex_init_robust(NULL);
+ pmutex->m_lock.m_flags |= UMUTEX_ROBUST;
+ }
+ if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
+ pmutex->m_spinloops =
+ _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
+ pmutex->m_yieldloops = _thr_yieldloops;
+ }
+}
+
+static int
+mutex_init(pthread_mutex_t *mutex,
+ const struct pthread_mutex_attr *mutex_attr,
+ void *(calloc_cb)(size_t, size_t))
+{
+ const struct pthread_mutex_attr *attr;
+ struct pthread_mutex *pmutex;
+ int error;
+
+ if (mutex_attr == NULL) {
+ attr = &_pthread_mutexattr_default;
+ } else {
+ attr = mutex_attr;
+ error = mutex_check_attr(attr);
+ if (error != 0)
+ return (error);
+ }
+ if ((pmutex = (pthread_mutex_t)
+ calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
+ return (ENOMEM);
+ mutex_init_body(pmutex, attr);
+ *mutex = pmutex;
+ return (0);
+}
+
+static int
+init_static(struct pthread *thread, pthread_mutex_t *mutex)
+{
+ int ret;
+
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
+
+ if (*mutex == THR_MUTEX_INITIALIZER)
+ ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
+ else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
+ ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
+ calloc);
+ else
+ ret = 0;
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
+
+ return (ret);
+}
+
+static void
+set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
+{
+ struct pthread_mutex *m2;
+
+ m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue);
+ if (m2 != NULL)
+ m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
+ else
+ m->m_lock.m_ceilings[1] = -1;
+}
+
+static void
+shared_mutex_init(struct pthread_mutex *pmtx, const struct
+ pthread_mutex_attr *mutex_attr)
+{
+ static const struct pthread_mutex_attr foobar_mutex_attr = {
+ .m_type = PTHREAD_MUTEX_DEFAULT,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_pshared = PTHREAD_PROCESS_SHARED,
+ .m_robust = PTHREAD_MUTEX_STALLED,
+ };
+ bool done;
+
+ /*
+ * Hack to allow multiple pthread_mutex_init() calls on the
+ * same process-shared mutex. We rely on kernel allocating
+ * zeroed offpage for the mutex, i.e. the
+ * PMUTEX_INITSTAGE_ALLOC value must be zero.
+ */
+ for (done = false; !done;) {
+ switch (pmtx->m_ps) {
+ case PMUTEX_INITSTAGE_DONE:
+ atomic_thread_fence_acq();
+ done = true;
+ break;
+ case PMUTEX_INITSTAGE_ALLOC:
+ if (atomic_cmpset_int(&pmtx->m_ps,
+ PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
+ if (mutex_attr == NULL)
+ mutex_attr = &foobar_mutex_attr;
+ mutex_init_body(pmtx, mutex_attr);
+ atomic_store_rel_int(&pmtx->m_ps,
+ PMUTEX_INITSTAGE_DONE);
+ done = true;
+ }
+ break;
+ case PMUTEX_INITSTAGE_BUSY:
+ _pthread_yield();
+ break;
+ default:
+ PANIC("corrupted offpage");
+ break;
+ }
+ }
+}
+
+int
+__pthread_mutex_init(pthread_mutex_t * __restrict mutex,
+ const pthread_mutexattr_t * __restrict mutex_attr)
+{
+ struct pthread_mutex *pmtx;
+ int ret;
+
+ if (mutex_attr != NULL) {
+ ret = mutex_check_attr(*mutex_attr);
+ if (ret != 0)
+ return (ret);
+ }
+ if (mutex_attr == NULL ||
+ (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
+ return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
+ calloc));
+ }
+ pmtx = __thr_pshared_offpage(__DECONST(void *, mutex), 1);
+ if (pmtx == NULL)
+ return (EFAULT);
+ *mutex = THR_PSHARED_PTR;
+ shared_mutex_init(pmtx, *mutex_attr);
+ return (0);
+}
+
+/* This function is used internally by malloc. */
+int
+_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t))
+{
+ static const struct pthread_mutex_attr attr = {
+ .m_type = PTHREAD_MUTEX_NORMAL,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_pshared = PTHREAD_PROCESS_PRIVATE,
+ .m_robust = PTHREAD_MUTEX_STALLED,
+ };
+ int ret;
+
+ ret = mutex_init(mutex, &attr, calloc_cb);
+ if (ret == 0)
+ (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
+ return (ret);
+}
+
+/*
+ * Fix mutex ownership for child process.
+ *
+ * Process private mutex ownership is transmitted from the forking
+ * thread to the child process.
+ *
+ * Process shared mutex should not be inherited because owner is
+ * forking thread which is in parent process, they are removed from
+ * the owned mutex list.
+ */
+static void
+queue_fork(struct pthread *curthread, struct mutex_queue *q,
+ struct mutex_queue *qp, uint bit)
+{
+ struct pthread_mutex *m;
+
+ TAILQ_INIT(q);
+ TAILQ_FOREACH(m, qp, m_pqe) {
+ TAILQ_INSERT_TAIL(q, m, m_qe);
+ m->m_lock.m_owner = TID(curthread) | bit;
+ }
+}
+
+void
+_mutex_fork(struct pthread *curthread)
+{
+
+ queue_fork(curthread, &curthread->mq[TMQ_NORM],
+ &curthread->mq[TMQ_NORM_PRIV], 0);
+ queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
+ &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
+ queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP],
+ &curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED);
+ curthread->robust_list = 0;
+}
+
+int
+_pthread_mutex_destroy(pthread_mutex_t *mutex)
+{
+ pthread_mutex_t m, m1;
+ int ret;
+
+ m = *mutex;
+ if (m < THR_MUTEX_DESTROYED) {
+ ret = 0;
+ } else if (m == THR_MUTEX_DESTROYED) {
+ ret = EINVAL;
+ } else {
+ if (m == THR_PSHARED_PTR) {
+ m1 = __thr_pshared_offpage(mutex, 0);
+ if (m1 != NULL) {
+ mutex_assert_not_owned(_get_curthread(), m1);
+ __thr_pshared_destroy(mutex);
+ }
+ *mutex = THR_MUTEX_DESTROYED;
+ return (0);
+ }
+ if (PMUTEX_OWNER_ID(m) != 0 &&
+ (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) {
+ ret = EBUSY;
+ } else {
+ *mutex = THR_MUTEX_DESTROYED;
+ mutex_assert_not_owned(_get_curthread(), m);
+ free(m);
+ ret = 0;
+ }
+ }
+
+ return (ret);
+}
+
+static int
+mutex_qidx(struct pthread_mutex *m)
+{
+
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
+ return (TMQ_NORM);
+ return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP);
+}
+
+/*
+ * Both enqueue_mutex() and dequeue_mutex() operate on the
+ * thread-private linkage of the locked mutexes and on the robust
+ * linkage.
+ *
+ * Robust list, as seen by kernel, must be consistent even in the case
+ * of thread termination at arbitrary moment. Since either enqueue or
+ * dequeue for list walked by kernel consists of rewriting a single
+ * forward pointer, it is safe. On the other hand, rewrite of the
+ * back pointer is not atomic WRT the forward one, but kernel does not
+ * care.
+ */
+static void
+enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m,
+ int error)
+{
+ struct pthread_mutex *m1;
+ uintptr_t *rl;
+ int qidx;
+
+ /* Add to the list of owned mutexes: */
+ if (error != EOWNERDEAD)
+ mutex_assert_not_owned(curthread, m);
+ qidx = mutex_qidx(m);
+ TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
+ if (is_robust_mutex(m)) {
+ rl = is_pshared_mutex(m) ? &curthread->robust_list :
+ &curthread->priv_robust_list;
+ m->m_rb_prev = NULL;
+ if (*rl != 0) {
+ m1 = __containerof((void *)*rl,
+ struct pthread_mutex, m_lock);
+ m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock;
+ m1->m_rb_prev = m;
+ } else {
+ m1 = NULL;
+ m->m_lock.m_rb_lnk = 0;
+ }
+ *rl = (uintptr_t)&m->m_lock;
+ }
+}
+
+static void
+dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
+{
+ struct pthread_mutex *mp, *mn;
+ int qidx;
+
+ mutex_assert_is_owned(m);
+ qidx = mutex_qidx(m);
+ if (is_robust_mutex(m)) {
+ mp = m->m_rb_prev;
+ if (mp == NULL) {
+ if (is_pshared_mutex(m)) {
+ curthread->robust_list = m->m_lock.m_rb_lnk;
+ } else {
+ curthread->priv_robust_list =
+ m->m_lock.m_rb_lnk;
+ }
+ } else {
+ mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk;
+ }
+ if (m->m_lock.m_rb_lnk != 0) {
+ mn = __containerof((void *)m->m_lock.m_rb_lnk,
+ struct pthread_mutex, m_lock);
+ mn->m_rb_prev = m->m_rb_prev;
+ }
+ m->m_lock.m_rb_lnk = 0;
+ m->m_rb_prev = NULL;
+ }
+ TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
+ set_inherited_priority(curthread, m);
+ mutex_init_link(m);
+}
+
+static int
+check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
+{
+ int ret;
+
+ *m = *mutex;
+ ret = 0;
+ if (*m == THR_PSHARED_PTR) {
+ *m = __thr_pshared_offpage(mutex, 0);
+ if (*m == NULL)
+ ret = EINVAL;
+ else
+ shared_mutex_init(*m, NULL);
+ } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
+ if (*m == THR_MUTEX_DESTROYED) {
+ ret = EINVAL;
+ } else {
+ ret = init_static(_get_curthread(), mutex);
+ if (ret == 0)
+ *m = *mutex;
+ }
+ }
+ return (ret);
+}
+
+int
+__pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ struct pthread *curthread;
+ struct pthread_mutex *m;
+ uint32_t id;
+ int ret, robust;
+
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret != 0)
+ return (ret);
+ curthread = _get_curthread();
+ id = TID(curthread);
+ if (m->m_flags & PMUTEX_FLAG_PRIVATE)
+ THR_CRITICAL_ENTER(curthread);
+ robust = _mutex_enter_robust(curthread, m);
+ ret = _thr_umutex_trylock(&m->m_lock, id);
+ if (__predict_true(ret == 0) || ret == EOWNERDEAD) {
+ enqueue_mutex(curthread, m, ret);
+ if (ret == EOWNERDEAD)
+ m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
+ } else if (PMUTEX_OWNER_ID(m) == id) {
+ ret = mutex_self_trylock(m);
+ } /* else {} */
+ if (robust)
+ _mutex_leave_robust(curthread, m);
+ if (ret != 0 && ret != EOWNERDEAD &&
+ (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0)
+ THR_CRITICAL_LEAVE(curthread);
+ return (ret);
+}
+
+static int
+mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
+ const struct timespec *abstime)
+{
+ uint32_t id, owner;
+ int count, ret;
+
+ id = TID(curthread);
+ if (PMUTEX_OWNER_ID(m) == id)
+ return (mutex_self_lock(m, abstime));
+
+ /*
+ * For adaptive mutexes, spin for a bit in the expectation
+ * that if the application requests this mutex type then
+ * the lock is likely to be released quickly and it is
+ * faster than entering the kernel
+ */
+ if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT |
+ UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0))
+ goto sleep_in_kernel;
+
+ if (!_thr_is_smp)
+ goto yield_loop;
+
+ count = m->m_spinloops;
+ while (count--) {
+ owner = m->m_lock.m_owner;
+ if ((owner & ~UMUTEX_CONTESTED) == 0) {
+ if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
+ id | owner)) {
+ ret = 0;
+ goto done;
+ }
+ }
+ CPU_SPINWAIT;
+ }
+
+yield_loop:
+ count = m->m_yieldloops;
+ while (count--) {
+ _sched_yield();
+ owner = m->m_lock.m_owner;
+ if ((owner & ~UMUTEX_CONTESTED) == 0) {
+ if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
+ id | owner)) {
+ ret = 0;
+ goto done;
+ }
+ }
+ }
+
+sleep_in_kernel:
+ if (abstime == NULL)
+ ret = __thr_umutex_lock(&m->m_lock, id);
+ else if (__predict_false(abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000))
+ ret = EINVAL;
+ else
+ ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
+done:
+ if (ret == 0 || ret == EOWNERDEAD) {
+ enqueue_mutex(curthread, m, ret);
+ if (ret == EOWNERDEAD)
+ m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
+ }
+ return (ret);
+}
+
+static inline int
+mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime,
+ bool cvattach, bool rb_onlist)
+{
+ struct pthread *curthread;
+ int ret, robust;
+
+ robust = 0; /* pacify gcc */
+ curthread = _get_curthread();
+ if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
+ THR_CRITICAL_ENTER(curthread);
+ if (!rb_onlist)
+ robust = _mutex_enter_robust(curthread, m);
+ ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
+ if (ret == 0 || ret == EOWNERDEAD) {
+ enqueue_mutex(curthread, m, ret);
+ if (ret == EOWNERDEAD)
+ m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
+ } else {
+ ret = mutex_lock_sleep(curthread, m, abstime);
+ }
+ if (!rb_onlist && robust)
+ _mutex_leave_robust(curthread, m);
+ if (ret != 0 && ret != EOWNERDEAD &&
+ (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach)
+ THR_CRITICAL_LEAVE(curthread);
+ return (ret);
+}
+
+int
+__pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+ struct pthread_mutex *m;
+ int ret;
+
+ _thr_check_init();
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ ret = mutex_lock_common(m, NULL, false, false);
+ return (ret);
+}
+
+int
+__pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
+ const struct timespec * __restrict abstime)
+{
+ struct pthread_mutex *m;
+ int ret;
+
+ _thr_check_init();
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ ret = mutex_lock_common(m, abstime, false, false);
+ return (ret);
+}
+
+int
+_pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+ struct pthread_mutex *mp;
+
+ if (*mutex == THR_PSHARED_PTR) {
+ mp = __thr_pshared_offpage(mutex, 0);
+ if (mp == NULL)
+ return (EINVAL);
+ shared_mutex_init(mp, NULL);
+ } else {
+ mp = *mutex;
+ }
+ return (mutex_unlock_common(mp, false, NULL));
+}
+
+int
+_mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist)
+{
+ int error;
+
+ error = mutex_lock_common(m, NULL, true, rb_onlist);
+ if (error == 0 || error == EOWNERDEAD)
+ m->m_count = count;
+ return (error);
+}
+
+int
+_mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
+{
+
+ /*
+ * Clear the count in case this is a recursive mutex.
+ */
+ *count = m->m_count;
+ m->m_count = 0;
+ (void)mutex_unlock_common(m, true, defer);
+ return (0);
+}
+
+int
+_mutex_cv_attach(struct pthread_mutex *m, int count)
+{
+ struct pthread *curthread;
+
+ curthread = _get_curthread();
+ enqueue_mutex(curthread, m, 0);
+ m->m_count = count;
+ return (0);
+}
+
+int
+_mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
+{
+ struct pthread *curthread;
+ int deferred, error;
+
+ curthread = _get_curthread();
+ if ((error = _mutex_owned(curthread, mp)) != 0)
+ return (error);
+
+ /*
+ * Clear the count in case this is a recursive mutex.
+ */
+ *recurse = mp->m_count;
+ mp->m_count = 0;
+ dequeue_mutex(curthread, mp);
+
+ /* Will this happen in real-world ? */
+ if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
+ deferred = 1;
+ mp->m_flags &= ~PMUTEX_FLAG_DEFERRED;
+ } else
+ deferred = 0;
+
+ if (deferred) {
+ _thr_wake_all(curthread->defer_waiters,
+ curthread->nwaiter_defer);
+ curthread->nwaiter_defer = 0;
+ }
+ return (0);
+}
+
+static int
+mutex_self_trylock(struct pthread_mutex *m)
+{
+ int ret;
+
+ switch (PMUTEX_TYPE(m->m_flags)) {
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ case PTHREAD_MUTEX_ADAPTIVE_NP:
+ ret = EBUSY;
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ if (m->m_count + 1 > 0) {
+ m->m_count++;
+ ret = 0;
+ } else
+ ret = EAGAIN;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return (ret);
+}
+
+static int
+mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
+{
+ struct timespec ts1, ts2;
+ int ret;
+
+ switch (PMUTEX_TYPE(m->m_flags)) {
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_ADAPTIVE_NP:
+ if (abstime) {
+ if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000) {
+ ret = EINVAL;
+ } else {
+ clock_gettime(CLOCK_REALTIME, &ts1);
+ TIMESPEC_SUB(&ts2, abstime, &ts1);
+ __sys_nanosleep(&ts2, NULL);
+ ret = ETIMEDOUT;
+ }
+ } else {
+ /*
+ * POSIX specifies that mutexes should return
+ * EDEADLK if a recursive lock is detected.
+ */
+ ret = EDEADLK;
+ }
+ break;
+
+ case PTHREAD_MUTEX_NORMAL:
+ /*
+ * What SS2 define as a 'normal' mutex. Intentionally
+ * deadlock on attempts to get a lock you already own.
+ */
+ ret = 0;
+ if (abstime) {
+ if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000) {
+ ret = EINVAL;
+ } else {
+ clock_gettime(CLOCK_REALTIME, &ts1);
+ TIMESPEC_SUB(&ts2, abstime, &ts1);
+ __sys_nanosleep(&ts2, NULL);
+ ret = ETIMEDOUT;
+ }
+ } else {
+ ts1.tv_sec = 30;
+ ts1.tv_nsec = 0;
+ for (;;)
+ __sys_nanosleep(&ts1, NULL);
+ }
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ if (m->m_count + 1 > 0) {
+ m->m_count++;
+ ret = 0;
+ } else
+ ret = EAGAIN;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return (ret);
+}
+
+static int
+mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer)
+{
+ struct pthread *curthread;
+ uint32_t id;
+ int deferred, error, robust;
+
+ if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
+ if (m == THR_MUTEX_DESTROYED)
+ return (EINVAL);
+ return (EPERM);
+ }
+
+ curthread = _get_curthread();
+ id = TID(curthread);
+
+ /*
+ * Check if the running thread is not the owner of the mutex.
+ */
+ if (__predict_false(PMUTEX_OWNER_ID(m) != id))
+ return (EPERM);
+
+ error = 0;
+ if (__predict_false(PMUTEX_TYPE(m->m_flags) ==
+ PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) {
+ m->m_count--;
+ } else {
+ if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
+ deferred = 1;
+ m->m_flags &= ~PMUTEX_FLAG_DEFERRED;
+ } else
+ deferred = 0;
+
+ robust = _mutex_enter_robust(curthread, m);
+ dequeue_mutex(curthread, m);
+ error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
+ if (deferred) {
+ if (mtx_defer == NULL) {
+ _thr_wake_all(curthread->defer_waiters,
+ curthread->nwaiter_defer);
+ curthread->nwaiter_defer = 0;
+ } else
+ *mtx_defer = 1;
+ }
+ if (robust)
+ _mutex_leave_robust(curthread, m);
+ }
+ if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
+ THR_CRITICAL_LEAVE(curthread);
+ return (error);
+}
+
+int
+_pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,
+ int * __restrict prioceiling)
+{
+ struct pthread_mutex *m;
+
+ if (*mutex == THR_PSHARED_PTR) {
+ m = __thr_pshared_offpage(__DECONST(void *, mutex), 0);
+ if (m == NULL)
+ return (EINVAL);
+ shared_mutex_init(m, NULL);
+ } else {
+ m = *mutex;
+ if (m <= THR_MUTEX_DESTROYED)
+ return (EINVAL);
+ }
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
+ return (EINVAL);
+ *prioceiling = m->m_lock.m_ceilings[0];
+ return (0);
+}
+
+int
+_pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,
+ int ceiling, int * __restrict old_ceiling)
+{
+ struct pthread *curthread;
+ struct pthread_mutex *m, *m1, *m2;
+ struct mutex_queue *q, *qp;
+ int qidx, ret;
+
+ if (*mutex == THR_PSHARED_PTR) {
+ m = __thr_pshared_offpage(mutex, 0);
+ if (m == NULL)
+ return (EINVAL);
+ shared_mutex_init(m, NULL);
+ } else {
+ m = *mutex;
+ if (m <= THR_MUTEX_DESTROYED)
+ return (EINVAL);
+ }
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
+ return (EINVAL);
+
+ ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
+ if (ret != 0)
+ return (ret);
+
+ curthread = _get_curthread();
+ if (PMUTEX_OWNER_ID(m) == TID(curthread)) {
+ mutex_assert_is_owned(m);
+ m1 = TAILQ_PREV(m, mutex_queue, m_qe);
+ m2 = TAILQ_NEXT(m, m_qe);
+ if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
+ (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
+ qidx = mutex_qidx(m);
+ q = &curthread->mq[qidx];
+ qp = &curthread->mq[qidx + 1];
+ TAILQ_REMOVE(q, m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_REMOVE(qp, m, m_pqe);
+ TAILQ_FOREACH(m2, q, m_qe) {
+ if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
+ TAILQ_INSERT_BEFORE(m2, m, m_qe);
+ if (!is_pshared_mutex(m)) {
+ while (m2 != NULL &&
+ is_pshared_mutex(m2)) {
+ m2 = TAILQ_PREV(m2,
+ mutex_queue, m_qe);
+ }
+ if (m2 == NULL) {
+ TAILQ_INSERT_HEAD(qp,
+ m, m_pqe);
+ } else {
+ TAILQ_INSERT_BEFORE(m2,
+ m, m_pqe);
+ }
+ }
+ return (0);
+ }
+ }
+ TAILQ_INSERT_TAIL(q, m, m_qe);
+ if (!is_pshared_mutex(m))
+ TAILQ_INSERT_TAIL(qp, m, m_pqe);
+ }
+ }
+ return (0);
+}
+
+int
+_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
+{
+ struct pthread_mutex *m;
+ int ret;
+
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ *count = m->m_spinloops;
+ return (ret);
+}
+
+int
+__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
+{
+ struct pthread_mutex *m;
+ int ret;
+
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ m->m_spinloops = count;
+ return (ret);
+}
+
+int
+_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
+{
+ struct pthread_mutex *m;
+ int ret;
+
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ *count = m->m_yieldloops;
+ return (ret);
+}
+
+int
+__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
+{
+ struct pthread_mutex *m;
+ int ret;
+
+ ret = check_and_init_mutex(mutex, &m);
+ if (ret == 0)
+ m->m_yieldloops = count;
+ return (0);
+}
+
+int
+_pthread_mutex_isowned_np(pthread_mutex_t *mutex)
+{
+ struct pthread_mutex *m;
+
+ if (*mutex == THR_PSHARED_PTR) {
+ m = __thr_pshared_offpage(mutex, 0);
+ if (m == NULL)
+ return (0);
+ shared_mutex_init(m, NULL);
+ } else {
+ m = *mutex;
+ if (m <= THR_MUTEX_DESTROYED)
+ return (0);
+ }
+ return (PMUTEX_OWNER_ID(m) == TID(_get_curthread()));
+}
+
+int
+_mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
+{
+
+ if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
+ if (mp == THR_MUTEX_DESTROYED)
+ return (EINVAL);
+ return (EPERM);
+ }
+ if (PMUTEX_OWNER_ID(mp) != TID(curthread))
+ return (EPERM);
+ return (0);
+}
+
+int
+_pthread_mutex_consistent(pthread_mutex_t *mutex)
+{
+ struct pthread_mutex *m;
+ struct pthread *curthread;
+
+ if (*mutex == THR_PSHARED_PTR) {
+ m = __thr_pshared_offpage(mutex, 0);
+ if (m == NULL)
+ return (EINVAL);
+ shared_mutex_init(m, NULL);
+ } else {
+ m = *mutex;
+ if (m <= THR_MUTEX_DESTROYED)
+ return (EINVAL);
+ }
+ curthread = _get_curthread();
+ if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) !=
+ (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT))
+ return (EINVAL);
+ if (PMUTEX_OWNER_ID(m) != TID(curthread))
+ return (EPERM);
+ m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT;
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_mutexattr.c b/lib/libthr/thread/thr_mutexattr.c
new file mode 100644
index 0000000000000..8313728e5cfbe
--- /dev/null
+++ b/lib/libthr/thread/thr_mutexattr.c
@@ -0,0 +1,295 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1996 Jeffrey Hsu <hsu@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_mutexattr_init, pthread_mutexattr_init);
+__weak_reference(_pthread_mutexattr_setkind_np, pthread_mutexattr_setkind_np);
+__weak_reference(_pthread_mutexattr_getkind_np, pthread_mutexattr_getkind_np);
+__weak_reference(_pthread_mutexattr_gettype, pthread_mutexattr_gettype);
+__weak_reference(_pthread_mutexattr_settype, pthread_mutexattr_settype);
+__weak_reference(_pthread_mutexattr_destroy, pthread_mutexattr_destroy);
+__weak_reference(_pthread_mutexattr_getpshared, pthread_mutexattr_getpshared);
+__weak_reference(_pthread_mutexattr_setpshared, pthread_mutexattr_setpshared);
+__weak_reference(_pthread_mutexattr_getprotocol, pthread_mutexattr_getprotocol);
+__weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol);
+__weak_reference(_pthread_mutexattr_getprioceiling,
+ pthread_mutexattr_getprioceiling);
+__weak_reference(_pthread_mutexattr_setprioceiling,
+ pthread_mutexattr_setprioceiling);
+__weak_reference(_pthread_mutexattr_getrobust, pthread_mutexattr_getrobust);
+__weak_reference(_pthread_mutexattr_setrobust, pthread_mutexattr_setrobust);
+
+int
+_pthread_mutexattr_init(pthread_mutexattr_t *attr)
+{
+ int ret;
+ pthread_mutexattr_t pattr;
+
+ if ((pattr = (pthread_mutexattr_t)
+ malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
+ ret = ENOMEM;
+ } else {
+ memcpy(pattr, &_pthread_mutexattr_default,
+ sizeof(struct pthread_mutex_attr));
+ *attr = pattr;
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+_pthread_mutexattr_setkind_np(pthread_mutexattr_t *attr, int kind)
+{
+ int ret;
+ if (attr == NULL || *attr == NULL) {
+ errno = EINVAL;
+ ret = -1;
+ } else {
+ (*attr)->m_type = kind;
+ ret = 0;
+ }
+ return(ret);
+}
+
+int
+_pthread_mutexattr_getkind_np(pthread_mutexattr_t attr)
+{
+ int ret;
+
+ if (attr == NULL) {
+ errno = EINVAL;
+ ret = -1;
+ } else {
+ ret = attr->m_type;
+ }
+ return (ret);
+}
+
+int
+_pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
+{
+ int ret;
+
+ if (attr == NULL || *attr == NULL || type >= PTHREAD_MUTEX_TYPE_MAX) {
+ ret = EINVAL;
+ } else {
+ (*attr)->m_type = type;
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+_pthread_mutexattr_gettype(const pthread_mutexattr_t * __restrict attr,
+ int * __restrict type)
+{
+ int ret;
+
+ if (attr == NULL || *attr == NULL || (*attr)->m_type >=
+ PTHREAD_MUTEX_TYPE_MAX) {
+ ret = EINVAL;
+ } else {
+ *type = (*attr)->m_type;
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+_pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
+{
+ int ret;
+ if (attr == NULL || *attr == NULL) {
+ ret = EINVAL;
+ } else {
+ free(*attr);
+ *attr = NULL;
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+_pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr,
+ int *pshared)
+{
+
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+ *pshared = (*attr)->m_pshared;
+ return (0);
+}
+
+int
+_pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
+{
+
+ if (attr == NULL || *attr == NULL ||
+ (pshared != PTHREAD_PROCESS_PRIVATE &&
+ pshared != PTHREAD_PROCESS_SHARED))
+ return (EINVAL);
+ (*attr)->m_pshared = pshared;
+ return (0);
+}
+
+int
+_pthread_mutexattr_getprotocol(const pthread_mutexattr_t * __restrict mattr,
+ int * __restrict protocol)
+{
+ int ret = 0;
+
+ if (mattr == NULL || *mattr == NULL)
+ ret = EINVAL;
+ else
+ *protocol = (*mattr)->m_protocol;
+
+ return (ret);
+}
+
+int
+_pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
+{
+ int ret = 0;
+
+ if (mattr == NULL || *mattr == NULL ||
+ protocol < PTHREAD_PRIO_NONE || protocol > PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else {
+ (*mattr)->m_protocol = protocol;
+ (*mattr)->m_ceiling = THR_MAX_RR_PRIORITY;
+ }
+ return (ret);
+}
+
+int
+_pthread_mutexattr_getprioceiling(const pthread_mutexattr_t * __restrict mattr,
+ int * __restrict prioceiling)
+{
+ int ret = 0;
+
+ if (mattr == NULL || *mattr == NULL)
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ *prioceiling = (*mattr)->m_ceiling;
+
+ return (ret);
+}
+
+int
+_pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling)
+{
+ int ret = 0;
+
+ if (mattr == NULL || *mattr == NULL)
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ (*mattr)->m_ceiling = prioceiling;
+
+ return (ret);
+}
+
+int
+_pthread_mutexattr_getrobust(pthread_mutexattr_t *mattr, int *robust)
+{
+ int ret;
+
+ if (mattr == NULL || *mattr == NULL) {
+ ret = EINVAL;
+ } else {
+ ret = 0;
+ *robust = (*mattr)->m_robust;
+ }
+ return (ret);
+}
+
+int
+_pthread_mutexattr_setrobust(pthread_mutexattr_t *mattr, int robust)
+{
+ int ret;
+
+ if (mattr == NULL || *mattr == NULL) {
+ ret = EINVAL;
+ } else if (robust != PTHREAD_MUTEX_STALLED &&
+ robust != PTHREAD_MUTEX_ROBUST) {
+ ret = EINVAL;
+ } else {
+ ret = 0;
+ (*mattr)->m_robust = robust;
+ }
+ return (ret);
+}
+
diff --git a/lib/libthr/thread/thr_once.c b/lib/libthr/thread/thr_once.c
new file mode 100644
index 0000000000000..90778440b76e5
--- /dev/null
+++ b/lib/libthr/thread/thr_once.c
@@ -0,0 +1,105 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_once, pthread_once);
+
+#define ONCE_NEVER_DONE PTHREAD_NEEDS_INIT
+#define ONCE_DONE PTHREAD_DONE_INIT
+#define ONCE_IN_PROGRESS 0x02
+#define ONCE_WAIT 0x03
+
+/*
+ * POSIX:
+ * The pthread_once() function is not a cancellation point. However,
+ * if init_routine is a cancellation point and is canceled, the effect
+ * on once_control shall be as if pthread_once() was never called.
+ */
+
+static void
+once_cancel_handler(void *arg)
+{
+ pthread_once_t *once_control;
+
+ once_control = arg;
+ if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS,
+ ONCE_NEVER_DONE))
+ return;
+ atomic_store_rel_int(&once_control->state, ONCE_NEVER_DONE);
+ _thr_umtx_wake(&once_control->state, INT_MAX, 0);
+}
+
+int
+_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
+{
+ struct pthread *curthread;
+ int state;
+
+ _thr_check_init();
+
+ for (;;) {
+ state = once_control->state;
+ if (state == ONCE_DONE) {
+ atomic_thread_fence_acq();
+ return (0);
+ }
+ if (state == ONCE_NEVER_DONE) {
+ if (atomic_cmpset_int(&once_control->state, state,
+ ONCE_IN_PROGRESS))
+ break;
+ } else if (state == ONCE_IN_PROGRESS) {
+ if (atomic_cmpset_int(&once_control->state, state,
+ ONCE_WAIT))
+ _thr_umtx_wait_uint(&once_control->state,
+ ONCE_WAIT, NULL, 0);
+ } else if (state == ONCE_WAIT) {
+ _thr_umtx_wait_uint(&once_control->state, state,
+ NULL, 0);
+ } else
+ return (EINVAL);
+ }
+
+ curthread = _get_curthread();
+ THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control);
+ init_routine();
+ THR_CLEANUP_POP(curthread, 0);
+ if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS,
+ ONCE_DONE))
+ return (0);
+ atomic_store_rel_int(&once_control->state, ONCE_DONE);
+ _thr_umtx_wake(&once_control->state, INT_MAX, 0);
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_printf.c b/lib/libthr/thread/thr_printf.c
new file mode 100644
index 0000000000000..dd761783a2a67
--- /dev/null
+++ b/lib/libthr/thread/thr_printf.c
@@ -0,0 +1,153 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2002 Jonathan Mini <mini@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+static void pchar(int fd, char c);
+static void pstr(int fd, const char *s);
+
+/*
+ * Write formatted output to stdout, in a thread-safe manner.
+ *
+ * Recognises the following conversions:
+ * %c -> char
+ * %d -> signed int (base 10)
+ * %s -> string
+ * %u -> unsigned int (base 10)
+ * %x -> unsigned int (base 16)
+ * %p -> unsigned int (base 16)
+ */
+void
+_thread_printf(int fd, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ _thread_vprintf(fd, fmt, ap);
+ va_end(ap);
+}
+
+void
+_thread_vprintf(int fd, const char *fmt, va_list ap)
+{
+ static const char digits[16] = "0123456789abcdef";
+ char buf[20];
+ char *s;
+ unsigned long r, u;
+ int c;
+ long d;
+ int islong, isalt;
+
+ while ((c = *fmt++)) {
+ isalt = 0;
+ islong = 0;
+ if (c == '%') {
+next: c = *fmt++;
+ if (c == '\0')
+ return;
+ switch (c) {
+ case '#':
+ isalt = 1;
+ goto next;
+ case 'c':
+ pchar(fd, va_arg(ap, int));
+ continue;
+ case 's':
+ pstr(fd, va_arg(ap, char *));
+ continue;
+ case 'l':
+ islong = 1;
+ goto next;
+ case 'p':
+ pstr(fd, "0x");
+ islong = 1;
+ /* FALLTHROUGH */
+ case 'd':
+ case 'u':
+ case 'x':
+ if (c == 'x' && isalt)
+ pstr(fd, "0x");
+ r = ((c == 'u') || (c == 'd')) ? 10 : 16;
+ if (c == 'd') {
+ if (islong)
+ d = va_arg(ap, unsigned long);
+ else
+ d = va_arg(ap, unsigned);
+ if (d < 0) {
+ pchar(fd, '-');
+ u = (unsigned long)(d * -1);
+ } else
+ u = (unsigned long)d;
+ } else {
+ if (islong)
+ u = va_arg(ap, unsigned long);
+ else
+ u = va_arg(ap, unsigned);
+ }
+ s = buf;
+ do {
+ *s++ = digits[u % r];
+ } while (u /= r);
+ while (--s >= buf)
+ pchar(fd, *s);
+ continue;
+ }
+ }
+ pchar(fd, c);
+ }
+}
+
+/*
+ * Write a single character to stdout, in a thread-safe manner.
+ */
+static void
+pchar(int fd, char c)
+{
+
+ __sys_write(fd, &c, 1);
+}
+
+/*
+ * Write a string to stdout, in a thread-safe manner.
+ */
+static void
+pstr(int fd, const char *s)
+{
+
+ __sys_write(fd, s, strlen(s));
+}
+
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
new file mode 100644
index 0000000000000..eac8880b5d739
--- /dev/null
+++ b/lib/libthr/thread/thr_private.h
@@ -0,0 +1,1015 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2005 Daniel M. Eischen <deischen@freebsd.org>
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _THR_PRIVATE_H
+#define _THR_PRIVATE_H
+
+/*
+ * Include files.
+ */
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/cdefs.h>
+#include <sys/queue.h>
+#include <sys/param.h>
+#include <sys/cpuset.h>
+#include <machine/atomic.h>
+#include <errno.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <ucontext.h>
+#include <sys/thr.h>
+#include <pthread.h>
+
+__NULLABILITY_PRAGMA_PUSH
+
+#define SYM_FB10(sym) __CONCAT(sym, _fb10)
+#define SYM_FBP10(sym) __CONCAT(sym, _fbp10)
+#define WEAK_REF(sym, alias) __weak_reference(sym, alias)
+#define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver)
+#define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver)
+
+#define FB10_COMPAT(func, sym) \
+ WEAK_REF(func, SYM_FB10(sym)); \
+ SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0)
+
+#define FB10_COMPAT_PRIVATE(func, sym) \
+ WEAK_REF(func, SYM_FBP10(sym)); \
+ SYM_DEFAULT(sym, SYM_FBP10(sym), FBSDprivate_1.0)
+
+struct pthread;
+extern struct pthread *_thr_initial __hidden;
+
+#include "pthread_md.h"
+#include "thr_umtx.h"
+#include "thread_db.h"
+
+#ifdef _PTHREAD_FORCED_UNWIND
+#define _BSD_SOURCE
+#include <unwind.h>
+#endif
+
+typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist;
+typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head;
+TAILQ_HEAD(mutex_queue, pthread_mutex);
+
+/* Signal to do cancellation */
+#define SIGCANCEL SIGTHR
+
+/*
+ * Kernel fatal error handler macro.
+ */
+#define PANIC(args...) _thread_exitf(__FILE__, __LINE__, ##args)
+
+/* Output debug messages like this: */
+#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
+#define stderr_debug(args...) _thread_printf(STDERR_FILENO, ##args)
+
+#ifdef _PTHREADS_INVARIANTS
+#define THR_ASSERT(cond, msg) do { \
+ if (__predict_false(!(cond))) \
+ PANIC(msg); \
+} while (0)
+#else
+#define THR_ASSERT(cond, msg)
+#endif
+
+#ifdef PIC
+# define STATIC_LIB_REQUIRE(name)
+#else
+# define STATIC_LIB_REQUIRE(name) __asm (".globl " #name)
+#endif
+
+#define TIMESPEC_ADD(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
+ if ((dst)->tv_nsec >= 1000000000) { \
+ (dst)->tv_sec++; \
+ (dst)->tv_nsec -= 1000000000; \
+ } \
+ } while (0)
+
+#define TIMESPEC_SUB(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
+ if ((dst)->tv_nsec < 0) { \
+ (dst)->tv_sec--; \
+ (dst)->tv_nsec += 1000000000; \
+ } \
+ } while (0)
+
+/* Magic cookie set for shared pthread locks and cv's pointers */
+#define THR_PSHARED_PTR \
+ ((void *)(uintptr_t)((1ULL << (NBBY * sizeof(long) - 1)) | 1))
+
+/* XXX These values should be same as those defined in pthread.h */
+#define THR_MUTEX_INITIALIZER ((struct pthread_mutex *)NULL)
+#define THR_ADAPTIVE_MUTEX_INITIALIZER ((struct pthread_mutex *)1)
+#define THR_MUTEX_DESTROYED ((struct pthread_mutex *)2)
+#define THR_COND_INITIALIZER ((struct pthread_cond *)NULL)
+#define THR_COND_DESTROYED ((struct pthread_cond *)1)
+#define THR_RWLOCK_INITIALIZER ((struct pthread_rwlock *)NULL)
+#define THR_RWLOCK_DESTROYED ((struct pthread_rwlock *)1)
+
+#define PMUTEX_FLAG_TYPE_MASK 0x0ff
+#define PMUTEX_FLAG_PRIVATE 0x100
+#define PMUTEX_FLAG_DEFERRED 0x200
+#define PMUTEX_TYPE(mtxflags) ((mtxflags) & PMUTEX_FLAG_TYPE_MASK)
+
+#define PMUTEX_OWNER_ID(m) ((m)->m_lock.m_owner & ~UMUTEX_CONTESTED)
+
+#define MAX_DEFER_WAITERS 50
+
+/*
+ * Values for pthread_mutex m_ps indicator.
+ */
+#define PMUTEX_INITSTAGE_ALLOC 0
+#define PMUTEX_INITSTAGE_BUSY 1
+#define PMUTEX_INITSTAGE_DONE 2
+
+struct pthread_mutex {
+ /*
+ * Lock for accesses to this structure.
+ */
+ struct umutex m_lock;
+ int m_flags;
+ int m_count;
+ int m_spinloops;
+ int m_yieldloops;
+ int m_ps; /* pshared init stage */
+ /*
+ * Link for all mutexes a thread currently owns, of the same
+ * prio type.
+ */
+ TAILQ_ENTRY(pthread_mutex) m_qe;
+ /* Link for all private mutexes a thread currently owns. */
+ TAILQ_ENTRY(pthread_mutex) m_pqe;
+ struct pthread_mutex *m_rb_prev;
+};
+
+struct pthread_mutex_attr {
+ enum pthread_mutextype m_type;
+ int m_protocol;
+ int m_ceiling;
+ int m_pshared;
+ int m_robust;
+};
+
+#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
+ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE, \
+ PTHREAD_MUTEX_STALLED }
+
+struct pthread_cond {
+ __uint32_t __has_user_waiters;
+ struct ucond kcond;
+};
+
+struct pthread_cond_attr {
+ int c_pshared;
+ int c_clockid;
+};
+
+struct pthread_barrier {
+ struct umutex b_lock;
+ struct ucond b_cv;
+ int64_t b_cycle;
+ int b_count;
+ int b_waiters;
+ int b_refcount;
+ int b_destroying;
+};
+
+struct pthread_barrierattr {
+ int pshared;
+};
+
+struct pthread_spinlock {
+ struct umutex s_lock;
+};
+
+/*
+ * Flags for condition variables.
+ */
+#define COND_FLAGS_PRIVATE 0x01
+#define COND_FLAGS_INITED 0x02
+#define COND_FLAGS_BUSY 0x04
+
+/*
+ * Cleanup definitions.
+ */
+struct pthread_cleanup {
+ struct pthread_cleanup *prev;
+ void (*routine)(void *);
+ void *routine_arg;
+ int onheap;
+};
+
+#define THR_CLEANUP_PUSH(td, func, arg) { \
+ struct pthread_cleanup __cup; \
+ \
+ __cup.routine = func; \
+ __cup.routine_arg = arg; \
+ __cup.onheap = 0; \
+ __cup.prev = (td)->cleanup; \
+ (td)->cleanup = &__cup;
+
+#define THR_CLEANUP_POP(td, exec) \
+ (td)->cleanup = __cup.prev; \
+ if ((exec) != 0) \
+ __cup.routine(__cup.routine_arg); \
+}
+
+struct pthread_atfork {
+ TAILQ_ENTRY(pthread_atfork) qe;
+ void (*prepare)(void);
+ void (*parent)(void);
+ void (*child)(void);
+};
+
+struct pthread_attr {
+#define pthread_attr_start_copy sched_policy
+ int sched_policy;
+ int sched_inherit;
+ int prio;
+ int suspend;
+#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
+ int flags;
+ void *stackaddr_attr;
+ size_t stacksize_attr;
+ size_t guardsize_attr;
+#define pthread_attr_end_copy cpuset
+ cpuset_t *cpuset;
+ size_t cpusetsize;
+};
+
+struct wake_addr {
+ struct wake_addr *link;
+ unsigned int value;
+ char pad[12];
+};
+
+struct sleepqueue {
+ TAILQ_HEAD(, pthread) sq_blocked;
+ SLIST_HEAD(, sleepqueue) sq_freeq;
+ LIST_ENTRY(sleepqueue) sq_hash;
+ SLIST_ENTRY(sleepqueue) sq_flink;
+ void *sq_wchan;
+ int sq_type;
+};
+
+/*
+ * Thread creation state attributes.
+ */
+#define THR_CREATE_RUNNING 0
+#define THR_CREATE_SUSPENDED 1
+
+/*
+ * Miscellaneous definitions.
+ */
+#define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024)
+
+/*
+ * Maximum size of initial thread's stack. This perhaps deserves to be larger
+ * than the stacks of other threads, since many applications are likely to run
+ * almost entirely on this stack.
+ */
+#define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2)
+
+/*
+ * Define priorities returned by kernel.
+ */
+#define THR_MIN_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_min)
+#define THR_MAX_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_max)
+#define THR_DEF_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_default)
+
+#define THR_MIN_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_min)
+#define THR_MAX_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_max)
+#define THR_DEF_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_default)
+
+/* XXX The SCHED_FIFO should have same priority range as SCHED_RR */
+#define THR_MIN_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO_1].pri_min)
+#define THR_MAX_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_max)
+#define THR_DEF_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_default)
+
+struct pthread_prio {
+ int pri_min;
+ int pri_max;
+ int pri_default;
+};
+
+struct pthread_rwlockattr {
+ int pshared;
+};
+
+struct pthread_rwlock {
+ struct urwlock lock;
+ uint32_t owner;
+};
+
+/*
+ * Thread states.
+ */
+enum pthread_state {
+ PS_RUNNING,
+ PS_DEAD
+};
+
+struct pthread_specific_elem {
+ const void *data;
+ int seqno;
+};
+
+struct pthread_key {
+ volatile int allocated;
+ int seqno;
+ void (*destructor)(void *);
+};
+
+/*
+ * lwpid_t is 32bit but kernel thr API exports tid as long type
+ * to preserve the ABI for M:N model in very early date (r131431).
+ */
+#define TID(thread) ((uint32_t) ((thread)->tid))
+
+/*
+ * Thread structure.
+ */
+struct pthread {
+#define _pthread_startzero tid
+ /* Kernel thread id. */
+ long tid;
+#define TID_TERMINATED 1
+
+ /*
+ * Lock for accesses to this thread structure.
+ */
+ struct umutex lock;
+
+ /* Internal condition variable cycle number. */
+ uint32_t cycle;
+
+ /* How many low level locks the thread held. */
+ int locklevel;
+
+ /*
+ * Set to non-zero when this thread has entered a critical
+ * region. We allow for recursive entries into critical regions.
+ */
+ int critical_count;
+
+ /* Signal blocked counter. */
+ int sigblock;
+
+ /* Queue entry for list of all threads. */
+ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
+
+ /* Queue entry for GC lists. */
+ TAILQ_ENTRY(pthread) gcle;
+
+ /* Hash queue entry. */
+ LIST_ENTRY(pthread) hle;
+
+ /* Sleep queue entry */
+ TAILQ_ENTRY(pthread) wle;
+
+ /* Threads reference count. */
+ int refcount;
+
+ /*
+ * Thread start routine, argument, stack pointer and thread
+ * attributes.
+ */
+ void *(*start_routine)(void *);
+ void *arg;
+ struct pthread_attr attr;
+
+#define SHOULD_CANCEL(thr) \
+ ((thr)->cancel_pending && (thr)->cancel_enable && \
+ (thr)->no_cancel == 0)
+
+ /* Cancellation is enabled */
+ int cancel_enable;
+
+ /* Cancellation request is pending */
+ int cancel_pending;
+
+ /* Thread is at cancellation point */
+ int cancel_point;
+
+ /* Cancellation is temporarily disabled */
+ int no_cancel;
+
+ /* Asynchronouse cancellation is enabled */
+ int cancel_async;
+
+ /* Cancellation is in progress */
+ int cancelling;
+
+ /* Thread temporary signal mask. */
+ sigset_t sigmask;
+
+ /* Thread should unblock SIGCANCEL. */
+ int unblock_sigcancel;
+
+ /* In sigsuspend state */
+ int in_sigsuspend;
+
+ /* deferred signal info */
+ siginfo_t deferred_siginfo;
+
+ /* signal mask to restore. */
+ sigset_t deferred_sigmask;
+
+ /* the sigaction should be used for deferred signal. */
+ struct sigaction deferred_sigact;
+
+ /* deferred signal delivery is performed, do not reenter. */
+ int deferred_run;
+
+ /* Force new thread to exit. */
+ int force_exit;
+
+ /* Thread state: */
+ enum pthread_state state;
+
+ /*
+ * Error variable used instead of errno. The function __error()
+ * returns a pointer to this.
+ */
+ int error;
+
+ /*
+ * The joiner is the thread that is joining to this thread. The
+ * join status keeps track of a join operation to another thread.
+ */
+ struct pthread *joiner;
+
+ /* Miscellaneous flags; only set with scheduling lock held. */
+ int flags;
+#define THR_FLAGS_PRIVATE 0x0001
+#define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */
+#define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */
+#define THR_FLAGS_DETACHED 0x0008 /* thread is detached */
+
+ /* Thread list flags; only set with thread list lock held. */
+ int tlflags;
+#define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
+#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
+#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
+
+ /*
+ * Queues of the owned mutexes. Private queue must have index
+ * + 1 of the corresponding full queue.
+ */
+#define TMQ_NORM 0 /* NORMAL or PRIO_INHERIT normal */
+#define TMQ_NORM_PRIV 1 /* NORMAL or PRIO_INHERIT normal priv */
+#define TMQ_NORM_PP 2 /* PRIO_PROTECT normal mutexes */
+#define TMQ_NORM_PP_PRIV 3 /* PRIO_PROTECT normal priv */
+#define TMQ_ROBUST_PP 4 /* PRIO_PROTECT robust mutexes */
+#define TMQ_ROBUST_PP_PRIV 5 /* PRIO_PROTECT robust priv */
+#define TMQ_NITEMS 6
+ struct mutex_queue mq[TMQ_NITEMS];
+
+ void *ret;
+ struct pthread_specific_elem *specific;
+ int specific_data_count;
+
+ /* Number rwlocks rdlocks held. */
+ int rdlock_count;
+
+ /*
+ * Current locks bitmap for rtld. */
+ int rtld_bits;
+
+ /* Thread control block */
+ struct tcb *tcb;
+
+ /* Cleanup handlers Link List */
+ struct pthread_cleanup *cleanup;
+
+#ifdef _PTHREAD_FORCED_UNWIND
+ struct _Unwind_Exception ex;
+ void *unwind_stackend;
+ int unwind_disabled;
+#endif
+
+ /*
+ * Magic value to help recognize a valid thread structure
+ * from an invalid one:
+ */
+#define THR_MAGIC ((u_int32_t) 0xd09ba115)
+ u_int32_t magic;
+
+ /* Enable event reporting */
+ int report_events;
+
+ /* Event mask */
+ int event_mask;
+
+ /* Event */
+ td_event_msg_t event_buf;
+
+ /* Wait channel */
+ void *wchan;
+
+ /* Referenced mutex. */
+ struct pthread_mutex *mutex_obj;
+
+ /* Thread will sleep. */
+ int will_sleep;
+
+ /* Number of threads deferred. */
+ int nwaiter_defer;
+
+ int robust_inited;
+ uintptr_t robust_list;
+ uintptr_t priv_robust_list;
+ uintptr_t inact_mtx;
+
+ /* Deferred threads from pthread_cond_signal. */
+ unsigned int *defer_waiters[MAX_DEFER_WAITERS];
+#define _pthread_endzero wake_addr
+
+ struct wake_addr *wake_addr;
+#define WAKE_ADDR(td) ((td)->wake_addr)
+
+ /* Sleep queue */
+ struct sleepqueue *sleepqueue;
+
+ /* pthread_set/get_name_np */
+ char *name;
+};
+
+#define THR_SHOULD_GC(thrd) \
+ ((thrd)->refcount == 0 && (thrd)->state == PS_DEAD && \
+ ((thrd)->flags & THR_FLAGS_DETACHED) != 0)
+
+#define THR_IN_CRITICAL(thrd) \
+ (((thrd)->locklevel > 0) || \
+ ((thrd)->critical_count > 0))
+
+#define THR_CRITICAL_ENTER(thrd) \
+ (thrd)->critical_count++
+
+#define THR_CRITICAL_LEAVE(thrd) \
+ do { \
+ (thrd)->critical_count--; \
+ _thr_ast(thrd); \
+ } while (0)
+
+#define THR_UMUTEX_TRYLOCK(thrd, lck) \
+ _thr_umutex_trylock((lck), TID(thrd))
+
+#define THR_UMUTEX_LOCK(thrd, lck) \
+ _thr_umutex_lock((lck), TID(thrd))
+
+#define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \
+ _thr_umutex_timedlock((lck), TID(thrd), (timo))
+
+#define THR_UMUTEX_UNLOCK(thrd, lck) \
+ _thr_umutex_unlock((lck), TID(thrd))
+
+#define THR_LOCK_ACQUIRE(thrd, lck) \
+do { \
+ (thrd)->locklevel++; \
+ _thr_umutex_lock(lck, TID(thrd)); \
+} while (0)
+
+#define THR_LOCK_ACQUIRE_SPIN(thrd, lck) \
+do { \
+ (thrd)->locklevel++; \
+ _thr_umutex_lock_spin(lck, TID(thrd)); \
+} while (0)
+
+#ifdef _PTHREADS_INVARIANTS
+#define THR_ASSERT_LOCKLEVEL(thrd) \
+do { \
+ if (__predict_false((thrd)->locklevel <= 0)) \
+ _thr_assert_lock_level(); \
+} while (0)
+#else
+#define THR_ASSERT_LOCKLEVEL(thrd)
+#endif
+
+#define THR_LOCK_RELEASE(thrd, lck) \
+do { \
+ THR_ASSERT_LOCKLEVEL(thrd); \
+ _thr_umutex_unlock((lck), TID(thrd)); \
+ (thrd)->locklevel--; \
+ _thr_ast(thrd); \
+} while (0)
+
+#define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
+#define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
+#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
+#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
+
+#define THREAD_LIST_RDLOCK(curthrd) \
+do { \
+ (curthrd)->locklevel++; \
+ _thr_rwl_rdlock(&_thr_list_lock); \
+} while (0)
+
+#define THREAD_LIST_WRLOCK(curthrd) \
+do { \
+ (curthrd)->locklevel++; \
+ _thr_rwl_wrlock(&_thr_list_lock); \
+} while (0)
+
+#define THREAD_LIST_UNLOCK(curthrd) \
+do { \
+ _thr_rwl_unlock(&_thr_list_lock); \
+ (curthrd)->locklevel--; \
+ _thr_ast(curthrd); \
+} while (0)
+
+/*
+ * Macros to insert/remove threads to the all thread list and
+ * the gc list.
+ */
+#define THR_LIST_ADD(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
+ _thr_hash_add(thrd); \
+ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_LIST_REMOVE(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_list, thrd, tle); \
+ _thr_hash_remove(thrd); \
+ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_GCLIST_ADD(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
+ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
+ _gc_count++; \
+ } \
+} while (0)
+#define THR_GCLIST_REMOVE(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
+ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
+ _gc_count--; \
+ } \
+} while (0)
+
+#define THR_REF_ADD(curthread, pthread) { \
+ THR_CRITICAL_ENTER(curthread); \
+ pthread->refcount++; \
+} while (0)
+
+#define THR_REF_DEL(curthread, pthread) { \
+ pthread->refcount--; \
+ THR_CRITICAL_LEAVE(curthread); \
+} while (0)
+
+#define GC_NEEDED() (_gc_count >= 5)
+
+#define SHOULD_REPORT_EVENT(curthr, e) \
+ (curthr->report_events && \
+ (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
+
+#ifndef __LIBC_ISTHREADED_DECLARED
+#define __LIBC_ISTHREADED_DECLARED
+extern int __isthreaded;
+#endif
+
+/*
+ * Global variables for the pthread kernel.
+ */
+
+extern char *_usrstack __hidden;
+
+/* For debugger */
+extern int _libthr_debug;
+extern int _thread_event_mask;
+extern struct pthread *_thread_last_event;
+/* Used in symbol lookup of libthread_db */
+extern struct pthread_key _thread_keytable[];
+
+/* List of all threads: */
+extern pthreadlist _thread_list;
+
+/* List of threads needing GC: */
+extern pthreadlist _thread_gc_list __hidden;
+
+extern int _thread_active_threads;
+extern atfork_head _thr_atfork_list __hidden;
+extern struct urwlock _thr_atfork_lock __hidden;
+
+/* Default thread attributes: */
+extern struct pthread_attr _pthread_attr_default __hidden;
+
+/* Default mutex attributes: */
+extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden;
+extern struct pthread_mutex_attr _pthread_mutexattr_adaptive_default __hidden;
+
+/* Default condition variable attributes: */
+extern struct pthread_cond_attr _pthread_condattr_default __hidden;
+
+extern struct pthread_prio _thr_priorities[] __hidden;
+
+extern int _thr_is_smp __hidden;
+
+extern size_t _thr_guard_default __hidden;
+extern size_t _thr_stack_default __hidden;
+extern size_t _thr_stack_initial __hidden;
+extern int _thr_page_size __hidden;
+extern int _thr_spinloops __hidden;
+extern int _thr_yieldloops __hidden;
+extern int _thr_queuefifo __hidden;
+
+/* Garbage thread count. */
+extern int _gc_count __hidden;
+
+extern struct umutex _mutex_static_lock __hidden;
+extern struct umutex _cond_static_lock __hidden;
+extern struct umutex _rwlock_static_lock __hidden;
+extern struct umutex _keytable_lock __hidden;
+extern struct urwlock _thr_list_lock __hidden;
+extern struct umutex _thr_event_lock __hidden;
+extern struct umutex _suspend_all_lock __hidden;
+extern int _suspend_all_waiters __hidden;
+extern int _suspend_all_cycle __hidden;
+extern struct pthread *_single_thread __hidden;
+
+/*
+ * Function prototype definitions.
+ */
+__BEGIN_DECLS
+int _thr_setthreaded(int) __hidden;
+int _mutex_cv_lock(struct pthread_mutex *, int, bool) __hidden;
+int _mutex_cv_unlock(struct pthread_mutex *, int *, int *) __hidden;
+int _mutex_cv_attach(struct pthread_mutex *, int) __hidden;
+int _mutex_cv_detach(struct pthread_mutex *, int *) __hidden;
+int _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden;
+int _mutex_reinit(pthread_mutex_t *) __hidden;
+void _mutex_fork(struct pthread *curthread) __hidden;
+int _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
+ __hidden;
+void _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m)
+ __hidden;
+void _libpthread_init(struct pthread *) __hidden;
+struct pthread *_thr_alloc(struct pthread *) __hidden;
+void _thread_exit(const char *, int, const char *) __hidden __dead2;
+void _thread_exitf(const char *, int, const char *, ...) __hidden __dead2
+ __printflike(3, 4);
+int _thr_ref_add(struct pthread *, struct pthread *, int) __hidden;
+void _thr_ref_delete(struct pthread *, struct pthread *) __hidden;
+void _thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden;
+int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden;
+void _thr_rtld_init(void) __hidden;
+void _thr_rtld_postfork_child(void) __hidden;
+int _thr_stack_alloc(struct pthread_attr *) __hidden;
+void _thr_stack_free(struct pthread_attr *) __hidden;
+void _thr_free(struct pthread *, struct pthread *) __hidden;
+void _thr_gc(struct pthread *) __hidden;
+void _thread_cleanupspecific(void) __hidden;
+void _thread_printf(int, const char *, ...) __hidden __printflike(2, 3);
+void _thread_vprintf(int, const char *, va_list) __hidden;
+void _thr_spinlock_init(void) __hidden;
+void _thr_cancel_enter(struct pthread *) __hidden;
+void _thr_cancel_enter2(struct pthread *, int) __hidden;
+void _thr_cancel_leave(struct pthread *, int) __hidden;
+void _thr_testcancel(struct pthread *) __hidden;
+void _thr_signal_block(struct pthread *) __hidden;
+void _thr_signal_unblock(struct pthread *) __hidden;
+void _thr_signal_init(int) __hidden;
+void _thr_signal_deinit(void) __hidden;
+int _thr_send_sig(struct pthread *, int sig) __hidden;
+void _thr_list_init(void) __hidden;
+void _thr_hash_add(struct pthread *) __hidden;
+void _thr_hash_remove(struct pthread *) __hidden;
+struct pthread *_thr_hash_find(struct pthread *) __hidden;
+void _thr_link(struct pthread *, struct pthread *) __hidden;
+void _thr_unlink(struct pthread *, struct pthread *) __hidden;
+void _thr_assert_lock_level(void) __hidden __dead2;
+void _thr_ast(struct pthread *) __hidden;
+void _thr_report_creation(struct pthread *curthread,
+ struct pthread *newthread) __hidden;
+void _thr_report_death(struct pthread *curthread) __hidden;
+int _thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden;
+int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden;
+void _thr_signal_prefork(void) __hidden;
+void _thr_signal_postfork(void) __hidden;
+void _thr_signal_postfork_child(void) __hidden;
+void _thr_suspend_all_lock(struct pthread *) __hidden;
+void _thr_suspend_all_unlock(struct pthread *) __hidden;
+void _thr_try_gc(struct pthread *, struct pthread *) __hidden;
+int _rtp_to_schedparam(const struct rtprio *rtp, int *policy,
+ struct sched_param *param) __hidden;
+int _schedparam_to_rtp(int policy, const struct sched_param *param,
+ struct rtprio *rtp) __hidden;
+void _thread_bp_create(void);
+void _thread_bp_death(void);
+int _sched_yield(void);
+
+void _pthread_cleanup_push(void (*)(void *), void *);
+void _pthread_cleanup_pop(int);
+void _pthread_exit_mask(void *status, sigset_t *mask) __dead2 __hidden;
+#ifndef _LIBC_PRIVATE_H_
+void _pthread_cancel_enter(int maycancel);
+void _pthread_cancel_leave(int maycancel);
+#endif
+int _pthread_mutex_consistent(pthread_mutex_t * _Nonnull);
+int _pthread_mutexattr_getrobust(pthread_mutexattr_t * _Nonnull __restrict,
+ int * _Nonnull __restrict);
+int _pthread_mutexattr_setrobust(pthread_mutexattr_t * _Nonnull, int);
+
+/* #include <fcntl.h> */
+#ifdef _SYS_FCNTL_H_
+#ifndef _LIBC_PRIVATE_H_
+int __sys_fcntl(int, int, ...);
+int __sys_openat(int, const char *, int, ...);
+#endif /* _LIBC_PRIVATE_H_ */
+#endif /* _SYS_FCNTL_H_ */
+
+/* #include <signal.h> */
+#ifdef _SIGNAL_H_
+int __sys_kill(pid_t, int);
+int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
+int __sys_sigpending(sigset_t *);
+int __sys_sigreturn(const ucontext_t *);
+#ifndef _LIBC_PRIVATE_H_
+int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
+int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
+int __sys_sigsuspend(const sigset_t *);
+int __sys_sigtimedwait(const sigset_t *, siginfo_t *,
+ const struct timespec *);
+int __sys_sigwait(const sigset_t *, int *);
+int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
+#endif /* _LIBC_PRIVATE_H_ */
+#endif /* _SYS_FCNTL_H_ */
+
+/* #include <time.h> */
+#ifdef _TIME_H_
+#ifndef _LIBC_PRIVATE_H_
+int __sys_clock_nanosleep(clockid_t, int, const struct timespec *,
+ struct timespec *);
+int __sys_nanosleep(const struct timespec *, struct timespec *);
+#endif /* _LIBC_PRIVATE_H_ */
+#endif /* _SYS_FCNTL_H_ */
+
+/* #include <sys/ucontext.h> */
+#ifdef _SYS_UCONTEXT_H_
+#ifndef _LIBC_PRIVATE_H_
+int __sys_setcontext(const ucontext_t *ucp);
+int __sys_swapcontext(ucontext_t *oucp, const ucontext_t *ucp);
+#endif /* _LIBC_PRIVATE_H_ */
+#endif /* _SYS_FCNTL_H_ */
+
+/* #include <unistd.h> */
+#ifdef _UNISTD_H_
+void __sys_exit(int);
+pid_t __sys_getpid(void);
+#ifndef _LIBC_PRIVATE_H_
+int __sys_close(int);
+int __sys_fork(void);
+ssize_t __sys_read(int, void *, size_t);
+#endif /* _LIBC_PRIVATE_H_ */
+#endif /* _SYS_FCNTL_H_ */
+
+static inline int
+_thr_isthreaded(void)
+{
+ return (__isthreaded != 0);
+}
+
+static inline int
+_thr_is_inited(void)
+{
+ return (_thr_initial != NULL);
+}
+
+static inline void
+_thr_check_init(void)
+{
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+}
+
+struct wake_addr *_thr_alloc_wake_addr(void);
+void _thr_release_wake_addr(struct wake_addr *);
+int _thr_sleep(struct pthread *, int, const struct timespec *);
+
+void _thr_wake_addr_init(void) __hidden;
+
+static inline void
+_thr_clear_wake(struct pthread *td)
+{
+ td->wake_addr->value = 0;
+}
+
+static inline int
+_thr_is_woken(struct pthread *td)
+{
+ return td->wake_addr->value != 0;
+}
+
+static inline void
+_thr_set_wake(unsigned int *waddr)
+{
+ *waddr = 1;
+ _thr_umtx_wake(waddr, INT_MAX, 0);
+}
+
+void _thr_wake_all(unsigned int *waddrs[], int) __hidden;
+
+static inline struct pthread *
+_sleepq_first(struct sleepqueue *sq)
+{
+ return TAILQ_FIRST(&sq->sq_blocked);
+}
+
+void _sleepq_init(void) __hidden;
+struct sleepqueue *_sleepq_alloc(void) __hidden;
+void _sleepq_free(struct sleepqueue *) __hidden;
+void _sleepq_lock(void *) __hidden;
+void _sleepq_unlock(void *) __hidden;
+struct sleepqueue *_sleepq_lookup(void *) __hidden;
+void _sleepq_add(void *, struct pthread *) __hidden;
+int _sleepq_remove(struct sleepqueue *, struct pthread *) __hidden;
+void _sleepq_drop(struct sleepqueue *,
+ void (*cb)(struct pthread *, void *arg), void *) __hidden;
+
+int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t));
+
+struct dl_phdr_info;
+void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info);
+void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden;
+void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden;
+void _thr_stack_fix_protection(struct pthread *thrd);
+
+int *__error_threaded(void) __hidden;
+void __thr_interpose_libc(void) __hidden;
+pid_t __thr_fork(void);
+int __thr_setcontext(const ucontext_t *ucp);
+int __thr_sigaction(int sig, const struct sigaction *act,
+ struct sigaction *oact) __hidden;
+int __thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset);
+int __thr_sigsuspend(const sigset_t * set);
+int __thr_sigtimedwait(const sigset_t *set, siginfo_t *info,
+ const struct timespec * timeout);
+int __thr_sigwait(const sigset_t *set, int *sig);
+int __thr_sigwaitinfo(const sigset_t *set, siginfo_t *info);
+int __thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp);
+
+void __thr_map_stacks_exec(void);
+
+struct _spinlock;
+void __thr_spinunlock(struct _spinlock *lck);
+void __thr_spinlock(struct _spinlock *lck);
+
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+
+void __thr_pshared_init(void) __hidden;
+void *__thr_pshared_offpage(void *key, int doalloc) __hidden;
+void __thr_pshared_destroy(void *key) __hidden;
+void __thr_pshared_atfork_pre(void) __hidden;
+void __thr_pshared_atfork_post(void) __hidden;
+
+__END_DECLS
+__NULLABILITY_PRAGMA_POP
+
+#endif /* !_THR_PRIVATE_H */
diff --git a/lib/libthr/thread/thr_pshared.c b/lib/libthr/thread/thr_pshared.c
new file mode 100644
index 0000000000000..83714785f9b18
--- /dev/null
+++ b/lib/libthr/thread/thr_pshared.c
@@ -0,0 +1,268 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ *
+ * This software was developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include "namespace.h"
+#include <stdlib.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+struct psh {
+ LIST_ENTRY(psh) link;
+ void *key;
+ void *val;
+};
+
+LIST_HEAD(pshared_hash_head, psh);
+#define HASH_SIZE 128
+static struct pshared_hash_head pshared_hash[HASH_SIZE];
+#define PSHARED_KEY_HASH(key) (((unsigned long)(key) >> 8) % HASH_SIZE)
+/* XXXKIB: lock could be split to per-hash chain, if appears contested */
+static struct urwlock pshared_lock = DEFAULT_URWLOCK;
+
+void
+__thr_pshared_init(void)
+{
+ int i;
+
+ _thr_urwlock_init(&pshared_lock);
+ for (i = 0; i < HASH_SIZE; i++)
+ LIST_INIT(&pshared_hash[i]);
+}
+
+static void
+pshared_rlock(struct pthread *curthread)
+{
+
+ curthread->locklevel++;
+ _thr_rwl_rdlock(&pshared_lock);
+}
+
+static void
+pshared_wlock(struct pthread *curthread)
+{
+
+ curthread->locklevel++;
+ _thr_rwl_wrlock(&pshared_lock);
+}
+
+static void
+pshared_unlock(struct pthread *curthread)
+{
+
+ _thr_rwl_unlock(&pshared_lock);
+ curthread->locklevel--;
+ _thr_ast(curthread);
+}
+
+/*
+ * Among all processes sharing a lock only one executes
+ * pthread_lock_destroy(). Other processes still have the hash and
+ * mapped off-page.
+ *
+ * Mitigate the problem by checking the liveness of all hashed keys
+ * periodically. Right now this is executed on each
+ * pthread_lock_destroy(), but may be done less often if found to be
+ * too time-consuming.
+ */
+static void
+pshared_gc(struct pthread *curthread)
+{
+ struct pshared_hash_head *hd;
+ struct psh *h, *h1;
+ int error, i;
+
+ pshared_wlock(curthread);
+ for (i = 0; i < HASH_SIZE; i++) {
+ hd = &pshared_hash[i];
+ LIST_FOREACH_SAFE(h, hd, link, h1) {
+ error = _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_ALIVE,
+ h->val, NULL);
+ if (error == 0)
+ continue;
+ LIST_REMOVE(h, link);
+ munmap(h->val, PAGE_SIZE);
+ free(h);
+ }
+ }
+ pshared_unlock(curthread);
+}
+
+static void *
+pshared_lookup(void *key)
+{
+ struct pshared_hash_head *hd;
+ struct psh *h;
+
+ hd = &pshared_hash[PSHARED_KEY_HASH(key)];
+ LIST_FOREACH(h, hd, link) {
+ if (h->key == key)
+ return (h->val);
+ }
+ return (NULL);
+}
+
+static int
+pshared_insert(void *key, void **val)
+{
+ struct pshared_hash_head *hd;
+ struct psh *h;
+
+ hd = &pshared_hash[PSHARED_KEY_HASH(key)];
+ LIST_FOREACH(h, hd, link) {
+ /*
+ * When the key already exists in the hash, we should
+ * return either the new (just mapped) or old (hashed)
+ * val, and the other val should be unmapped to avoid
+ * address space leak.
+ *
+ * If two threads perform lock of the same object
+ * which is not yet stored in the pshared_hash, then
+ * the val already inserted by the first thread should
+ * be returned, and the second val freed (order is by
+ * the pshared_lock()). Otherwise, if we unmap the
+ * value obtained from the hash, the first thread
+ * might operate on an unmapped off-page object.
+ *
+ * There is still an issue: if hashed key was unmapped
+ * and then other page is mapped at the same key
+ * address, the hash would return the old val. I
+ * decided to handle the race of simultaneous hash
+ * insertion, leaving the unlikely remap problem
+ * unaddressed.
+ */
+ if (h->key == key) {
+ if (h->val != *val) {
+ munmap(*val, PAGE_SIZE);
+ *val = h->val;
+ }
+ return (1);
+ }
+ }
+
+ h = malloc(sizeof(*h));
+ if (h == NULL)
+ return (0);
+ h->key = key;
+ h->val = *val;
+ LIST_INSERT_HEAD(hd, h, link);
+ return (1);
+}
+
+static void *
+pshared_remove(void *key)
+{
+ struct pshared_hash_head *hd;
+ struct psh *h;
+ void *val;
+
+ hd = &pshared_hash[PSHARED_KEY_HASH(key)];
+ LIST_FOREACH(h, hd, link) {
+ if (h->key == key) {
+ LIST_REMOVE(h, link);
+ val = h->val;
+ free(h);
+ return (val);
+ }
+ }
+ return (NULL);
+}
+
+static void
+pshared_clean(void *key, void *val)
+{
+
+ if (val != NULL)
+ munmap(val, PAGE_SIZE);
+ _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_DESTROY, key, NULL);
+}
+
+void *
+__thr_pshared_offpage(void *key, int doalloc)
+{
+ struct pthread *curthread;
+ void *res;
+ int fd, ins_done;
+
+ curthread = _get_curthread();
+ pshared_rlock(curthread);
+ res = pshared_lookup(key);
+ pshared_unlock(curthread);
+ if (res != NULL)
+ return (res);
+ fd = _umtx_op(NULL, UMTX_OP_SHM, doalloc ? UMTX_SHM_CREAT :
+ UMTX_SHM_LOOKUP, key, NULL);
+ if (fd == -1)
+ return (NULL);
+ res = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (res == MAP_FAILED)
+ return (NULL);
+ pshared_wlock(curthread);
+ ins_done = pshared_insert(key, &res);
+ pshared_unlock(curthread);
+ if (!ins_done) {
+ pshared_clean(key, res);
+ res = NULL;
+ }
+ return (res);
+}
+
+void
+__thr_pshared_destroy(void *key)
+{
+ struct pthread *curthread;
+ void *val;
+
+ curthread = _get_curthread();
+ pshared_wlock(curthread);
+ val = pshared_remove(key);
+ pshared_unlock(curthread);
+ pshared_clean(key, val);
+ pshared_gc(curthread);
+}
+
+void
+__thr_pshared_atfork_pre(void)
+{
+
+ _thr_rwl_rdlock(&pshared_lock);
+}
+
+void
+__thr_pshared_atfork_post(void)
+{
+
+ _thr_rwl_unlock(&pshared_lock);
+}
diff --git a/lib/libthr/thread/thr_pspinlock.c b/lib/libthr/thread/thr_pspinlock.c
new file mode 100644
index 0000000000000..c71bdbb3f1960
--- /dev/null
+++ b/lib/libthr/thread/thr_pspinlock.c
@@ -0,0 +1,155 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+_Static_assert(sizeof(struct pthread_spinlock) <= PAGE_SIZE,
+ "pthread_spinlock is too large for off-page");
+
+#define SPIN_COUNT 100000
+
+__weak_reference(_pthread_spin_init, pthread_spin_init);
+__weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
+__weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
+__weak_reference(_pthread_spin_lock, pthread_spin_lock);
+__weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
+
+int
+_pthread_spin_init(pthread_spinlock_t *lock, int pshared)
+{
+ struct pthread_spinlock *lck;
+
+ if (lock == NULL)
+ return (EINVAL);
+ if (pshared == PTHREAD_PROCESS_PRIVATE) {
+ lck = malloc(sizeof(struct pthread_spinlock));
+ if (lck == NULL)
+ return (ENOMEM);
+ *lock = lck;
+ } else if (pshared == PTHREAD_PROCESS_SHARED) {
+ lck = __thr_pshared_offpage(lock, 1);
+ if (lck == NULL)
+ return (EFAULT);
+ *lock = THR_PSHARED_PTR;
+ } else {
+ return (EINVAL);
+ }
+ _thr_umutex_init(&lck->s_lock);
+ return (0);
+}
+
+int
+_pthread_spin_destroy(pthread_spinlock_t *lock)
+{
+ void *l;
+ int ret;
+
+ if (lock == NULL || *lock == NULL) {
+ ret = EINVAL;
+ } else if (*lock == THR_PSHARED_PTR) {
+ l = __thr_pshared_offpage(lock, 0);
+ if (l != NULL)
+ __thr_pshared_destroy(l);
+ ret = 0;
+ } else {
+ free(*lock);
+ *lock = NULL;
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+_pthread_spin_trylock(pthread_spinlock_t *lock)
+{
+ struct pthread_spinlock *lck;
+
+ if (lock == NULL || *lock == NULL)
+ return (EINVAL);
+ lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock;
+ if (lck == NULL)
+ return (EINVAL);
+ return (THR_UMUTEX_TRYLOCK(_get_curthread(), &lck->s_lock));
+}
+
+int
+_pthread_spin_lock(pthread_spinlock_t *lock)
+{
+ struct pthread *curthread;
+ struct pthread_spinlock *lck;
+ int count;
+
+ if (lock == NULL)
+ return (EINVAL);
+ lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock;
+ if (lck == NULL)
+ return (EINVAL);
+
+ curthread = _get_curthread();
+ count = SPIN_COUNT;
+ while (THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock) != 0) {
+ while (lck->s_lock.m_owner) {
+ if (!_thr_is_smp) {
+ _pthread_yield();
+ } else {
+ CPU_SPINWAIT;
+ if (--count <= 0) {
+ count = SPIN_COUNT;
+ _pthread_yield();
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+int
+_pthread_spin_unlock(pthread_spinlock_t *lock)
+{
+ struct pthread_spinlock *lck;
+
+ if (lock == NULL)
+ return (EINVAL);
+ lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock;
+ if (lck == NULL)
+ return (EINVAL);
+ return (THR_UMUTEX_UNLOCK(_get_curthread(), &lck->s_lock));
+}
diff --git a/lib/libthr/thread/thr_resume_np.c b/lib/libthr/thread/thr_resume_np.c
new file mode 100644
index 0000000000000..125d70a595e90
--- /dev/null
+++ b/lib/libthr/thread/thr_resume_np.c
@@ -0,0 +1,99 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_resume_np, pthread_resume_np);
+__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
+
+static void resume_common(struct pthread *thread);
+
+/* Resume a thread: */
+int
+_pthread_resume_np(pthread_t thread)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ /* Add a reference to the thread: */
+ if ((ret = _thr_find_thread(curthread, thread, /*include dead*/0)) == 0) {
+ /* Lock the threads scheduling queue: */
+ resume_common(thread);
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+ return (ret);
+}
+
+void
+_pthread_resume_all_np(void)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread *thread;
+ int old_nocancel;
+
+ old_nocancel = curthread->no_cancel;
+ curthread->no_cancel = 1;
+ _thr_suspend_all_lock(curthread);
+ /* Take the thread list lock: */
+ THREAD_LIST_RDLOCK(curthread);
+
+ TAILQ_FOREACH(thread, &_thread_list, tle) {
+ if (thread != curthread) {
+ THR_THREAD_LOCK(curthread, thread);
+ resume_common(thread);
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+ }
+
+ /* Release the thread list lock: */
+ THREAD_LIST_UNLOCK(curthread);
+ _thr_suspend_all_unlock(curthread);
+ curthread->no_cancel = old_nocancel;
+ _thr_testcancel(curthread);
+}
+
+static void
+resume_common(struct pthread *thread)
+{
+ /* Clear the suspend flag: */
+ thread->flags &= ~(THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED);
+ thread->cycle++;
+ _thr_umtx_wake(&thread->cycle, 1, 0);
+}
diff --git a/lib/libthr/thread/thr_rtld.c b/lib/libthr/thread/thr_rtld.c
new file mode 100644
index 0000000000000..3239a9dcfb18f
--- /dev/null
+++ b/lib/libthr/thread/thr_rtld.c
@@ -0,0 +1,244 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2006, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+ /*
+ * A lockless rwlock for rtld.
+ */
+#include <sys/cdefs.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <link.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "libc_private.h"
+#include "rtld_lock.h"
+#include "thr_private.h"
+
+#undef errno
+extern int errno;
+
+static int _thr_rtld_clr_flag(int);
+static void *_thr_rtld_lock_create(void);
+static void _thr_rtld_lock_destroy(void *);
+static void _thr_rtld_lock_release(void *);
+static void _thr_rtld_rlock_acquire(void *);
+static int _thr_rtld_set_flag(int);
+static void _thr_rtld_wlock_acquire(void *);
+
+struct rtld_lock {
+ struct urwlock lock;
+ char _pad[CACHE_LINE_SIZE - sizeof(struct urwlock)];
+};
+
+static struct rtld_lock lock_place[MAX_RTLD_LOCKS] __aligned(CACHE_LINE_SIZE);
+static int busy_places;
+
+static void *
+_thr_rtld_lock_create(void)
+{
+ int locki;
+ struct rtld_lock *l;
+ static const char fail[] = "_thr_rtld_lock_create failed\n";
+
+ for (locki = 0; locki < MAX_RTLD_LOCKS; locki++) {
+ if ((busy_places & (1 << locki)) == 0)
+ break;
+ }
+ if (locki == MAX_RTLD_LOCKS) {
+ write(2, fail, sizeof(fail) - 1);
+ return (NULL);
+ }
+ busy_places |= (1 << locki);
+
+ l = &lock_place[locki];
+ l->lock.rw_flags = URWLOCK_PREFER_READER;
+ return (l);
+}
+
+static void
+_thr_rtld_lock_destroy(void *lock)
+{
+ int locki;
+ size_t i;
+
+ locki = (struct rtld_lock *)lock - &lock_place[0];
+ for (i = 0; i < sizeof(struct rtld_lock); ++i)
+ ((char *)lock)[i] = 0;
+ busy_places &= ~(1 << locki);
+}
+
+#define SAVE_ERRNO() { \
+ if (curthread != _thr_initial) \
+ errsave = curthread->error; \
+ else \
+ errsave = errno; \
+}
+
+#define RESTORE_ERRNO() { \
+ if (curthread != _thr_initial) \
+ curthread->error = errsave; \
+ else \
+ errno = errsave; \
+}
+
+static void
+_thr_rtld_rlock_acquire(void *lock)
+{
+ struct pthread *curthread;
+ struct rtld_lock *l;
+ int errsave;
+
+ curthread = _get_curthread();
+ SAVE_ERRNO();
+ l = (struct rtld_lock *)lock;
+
+ THR_CRITICAL_ENTER(curthread);
+ while (_thr_rwlock_rdlock(&l->lock, 0, NULL) != 0)
+ ;
+ curthread->rdlock_count++;
+ RESTORE_ERRNO();
+}
+
+static void
+_thr_rtld_wlock_acquire(void *lock)
+{
+ struct pthread *curthread;
+ struct rtld_lock *l;
+ int errsave;
+
+ curthread = _get_curthread();
+ SAVE_ERRNO();
+ l = (struct rtld_lock *)lock;
+
+ THR_CRITICAL_ENTER(curthread);
+ while (_thr_rwlock_wrlock(&l->lock, NULL) != 0)
+ ;
+ RESTORE_ERRNO();
+}
+
+static void
+_thr_rtld_lock_release(void *lock)
+{
+ struct pthread *curthread;
+ struct rtld_lock *l;
+ int32_t state;
+ int errsave;
+
+ curthread = _get_curthread();
+ SAVE_ERRNO();
+ l = (struct rtld_lock *)lock;
+
+ state = l->lock.rw_state;
+ if (_thr_rwlock_unlock(&l->lock) == 0) {
+ if ((state & URWLOCK_WRITE_OWNER) == 0)
+ curthread->rdlock_count--;
+ THR_CRITICAL_LEAVE(curthread);
+ }
+ RESTORE_ERRNO();
+}
+
+static int
+_thr_rtld_set_flag(int mask __unused)
+{
+ /*
+ * The caller's code in rtld-elf is broken, it is not signal safe,
+ * just return zero to fool it.
+ */
+ return (0);
+}
+
+static int
+_thr_rtld_clr_flag(int mask __unused)
+{
+ return (0);
+}
+
+void
+_thr_rtld_init(void)
+{
+ struct RtldLockInfo li;
+ struct pthread *curthread;
+ ucontext_t *uc;
+ long dummy = -1;
+ int uc_len;
+
+ curthread = _get_curthread();
+
+ /* force to resolve _umtx_op PLT */
+ _umtx_op_err((struct umtx *)&dummy, UMTX_OP_WAKE, 1, 0, 0);
+
+ /* force to resolve errno() PLT */
+ __error();
+
+ /* force to resolve memcpy PLT */
+ memcpy(&dummy, &dummy, sizeof(dummy));
+
+ mprotect(NULL, 0, 0);
+ _rtld_get_stack_prot();
+
+ li.lock_create = _thr_rtld_lock_create;
+ li.lock_destroy = _thr_rtld_lock_destroy;
+ li.rlock_acquire = _thr_rtld_rlock_acquire;
+ li.wlock_acquire = _thr_rtld_wlock_acquire;
+ li.lock_release = _thr_rtld_lock_release;
+ li.thread_set_flag = _thr_rtld_set_flag;
+ li.thread_clr_flag = _thr_rtld_clr_flag;
+ li.at_fork = NULL;
+
+ /*
+ * Preresolve the symbols needed for the fork interposer. We
+ * call _rtld_atfork_pre() and _rtld_atfork_post() with NULL
+ * argument to indicate that no actual locking inside the
+ * functions should happen. Neither rtld compat locks nor
+ * libthr rtld locks cannot work there:
+ * - compat locks do not handle the case of two locks taken
+ * in write mode (the signal mask for the thread is corrupted);
+ * - libthr locks would work, but locked rtld_bind_lock prevents
+ * symbol resolution for _rtld_atfork_post.
+ */
+ _rtld_atfork_pre(NULL);
+ _rtld_atfork_post(NULL);
+ _malloc_prefork();
+ _malloc_postfork();
+ getpid();
+ syscall(SYS_getpid);
+
+ /* mask signals, also force to resolve __sys_sigprocmask PLT */
+ _thr_signal_block(curthread);
+ _rtld_thread_init(&li);
+ _thr_signal_unblock(curthread);
+
+ uc_len = __getcontextx_size();
+ uc = alloca(uc_len);
+ getcontext(uc);
+ __fillcontextx2((char *)uc);
+}
diff --git a/lib/libthr/thread/thr_rwlock.c b/lib/libthr/thread/thr_rwlock.c
new file mode 100644
index 0000000000000..3eb837c0ff352
--- /dev/null
+++ b/lib/libthr/thread/thr_rwlock.c
@@ -0,0 +1,377 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 1998 Alex Nash
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <errno.h>
+#include <limits.h>
+#include <stdlib.h>
+
+#include "namespace.h"
+#include <pthread.h>
+#include "un-namespace.h"
+#include "thr_private.h"
+
+_Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
+ "pthread_rwlock is too large for off-page");
+
+__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
+__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
+__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
+__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
+__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
+__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
+__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
+__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
+__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
+
+static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
+static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
+
+static int __always_inline
+check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
+{
+ if (__predict_false(*rwlock == THR_PSHARED_PTR ||
+ *rwlock <= THR_RWLOCK_DESTROYED))
+ return (init_rwlock(rwlock, rwlock_out));
+ *rwlock_out = *rwlock;
+ return (0);
+}
+
+static int __noinline
+init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
+{
+ pthread_rwlock_t prwlock;
+ int ret;
+
+ if (*rwlock == THR_PSHARED_PTR) {
+ prwlock = __thr_pshared_offpage(rwlock, 0);
+ if (prwlock == NULL)
+ return (EINVAL);
+ } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
+ if (prwlock == THR_RWLOCK_INITIALIZER) {
+ ret = init_static(_get_curthread(), rwlock);
+ if (ret != 0)
+ return (ret);
+ } else if (prwlock == THR_RWLOCK_DESTROYED) {
+ return (EINVAL);
+ }
+ prwlock = *rwlock;
+ }
+ *rwlock_out = prwlock;
+ return (0);
+}
+
+static int
+rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
+{
+ pthread_rwlock_t prwlock;
+
+ if (attr == NULL || *attr == NULL ||
+ (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
+ prwlock = calloc(1, sizeof(struct pthread_rwlock));
+ if (prwlock == NULL)
+ return (ENOMEM);
+ *rwlock = prwlock;
+ } else {
+ prwlock = __thr_pshared_offpage(rwlock, 1);
+ if (prwlock == NULL)
+ return (EFAULT);
+ prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
+ *rwlock = THR_PSHARED_PTR;
+ }
+ return (0);
+}
+
+int
+_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
+{
+ pthread_rwlock_t prwlock;
+ int ret;
+
+ prwlock = *rwlock;
+ if (prwlock == THR_RWLOCK_INITIALIZER)
+ ret = 0;
+ else if (prwlock == THR_RWLOCK_DESTROYED)
+ ret = EINVAL;
+ else if (prwlock == THR_PSHARED_PTR) {
+ *rwlock = THR_RWLOCK_DESTROYED;
+ __thr_pshared_destroy(rwlock);
+ ret = 0;
+ } else {
+ *rwlock = THR_RWLOCK_DESTROYED;
+ free(prwlock);
+ ret = 0;
+ }
+ return (ret);
+}
+
+static int
+init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
+{
+ int ret;
+
+ THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
+
+ if (*rwlock == THR_RWLOCK_INITIALIZER)
+ ret = rwlock_init(rwlock, NULL);
+ else
+ ret = 0;
+
+ THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
+
+ return (ret);
+}
+
+int
+_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
+{
+
+ *rwlock = NULL;
+ return (rwlock_init(rwlock, attr));
+}
+
+static int
+rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
+{
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int flags;
+ int ret;
+
+ ret = check_and_init_rwlock(rwlock, &prwlock);
+ if (ret != 0)
+ return (ret);
+
+ if (curthread->rdlock_count) {
+ /*
+ * To avoid having to track all the rdlocks held by
+ * a thread or all of the threads that hold a rdlock,
+ * we keep a simple count of all the rdlocks held by
+ * a thread. If a thread holds any rdlocks it is
+ * possible that it is attempting to take a recursive
+ * rdlock. If there are blocked writers and precedence
+ * is given to them, then that would result in the thread
+ * deadlocking. So allowing a thread to take the rdlock
+ * when it already has one or more rdlocks avoids the
+ * deadlock. I hope the reader can follow that logic ;-)
+ */
+ flags = URWLOCK_PREFER_READER;
+ } else {
+ flags = 0;
+ }
+
+ /*
+ * POSIX said the validity of the abstimeout parameter need
+ * not be checked if the lock can be immediately acquired.
+ */
+ ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
+ if (ret == 0) {
+ curthread->rdlock_count++;
+ return (ret);
+ }
+
+ if (__predict_false(abstime &&
+ (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
+ return (EINVAL);
+
+ for (;;) {
+ /* goto kernel and lock it */
+ ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
+ if (ret != EINTR)
+ break;
+
+ /* if interrupted, try to lock it in userland again. */
+ if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret == 0)
+ curthread->rdlock_count++;
+ return (ret);
+}
+
+int
+_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
+{
+ return (rwlock_rdlock_common(rwlock, NULL));
+}
+
+int
+_pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
+ const struct timespec * __restrict abstime)
+{
+ return (rwlock_rdlock_common(rwlock, abstime));
+}
+
+int
+_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
+{
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int flags;
+ int ret;
+
+ ret = check_and_init_rwlock(rwlock, &prwlock);
+ if (ret != 0)
+ return (ret);
+
+ if (curthread->rdlock_count) {
+ /*
+ * To avoid having to track all the rdlocks held by
+ * a thread or all of the threads that hold a rdlock,
+ * we keep a simple count of all the rdlocks held by
+ * a thread. If a thread holds any rdlocks it is
+ * possible that it is attempting to take a recursive
+ * rdlock. If there are blocked writers and precedence
+ * is given to them, then that would result in the thread
+ * deadlocking. So allowing a thread to take the rdlock
+ * when it already has one or more rdlocks avoids the
+ * deadlock. I hope the reader can follow that logic ;-)
+ */
+ flags = URWLOCK_PREFER_READER;
+ } else {
+ flags = 0;
+ }
+
+ ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
+ if (ret == 0)
+ curthread->rdlock_count++;
+ return (ret);
+}
+
+int
+_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
+{
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
+
+ ret = check_and_init_rwlock(rwlock, &prwlock);
+ if (ret != 0)
+ return (ret);
+
+ ret = _thr_rwlock_trywrlock(&prwlock->lock);
+ if (ret == 0)
+ prwlock->owner = TID(curthread);
+ return (ret);
+}
+
+static int
+rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
+{
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
+
+ ret = check_and_init_rwlock(rwlock, &prwlock);
+ if (ret != 0)
+ return (ret);
+
+ /*
+ * POSIX said the validity of the abstimeout parameter need
+ * not be checked if the lock can be immediately acquired.
+ */
+ ret = _thr_rwlock_trywrlock(&prwlock->lock);
+ if (ret == 0) {
+ prwlock->owner = TID(curthread);
+ return (ret);
+ }
+
+ if (__predict_false(abstime &&
+ (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
+ return (EINVAL);
+
+ for (;;) {
+ /* goto kernel and lock it */
+ ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
+ if (ret == 0) {
+ prwlock->owner = TID(curthread);
+ break;
+ }
+
+ if (ret != EINTR)
+ break;
+
+ /* if interrupted, try to lock it in userland again. */
+ if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
+ ret = 0;
+ prwlock->owner = TID(curthread);
+ break;
+ }
+ }
+ return (ret);
+}
+
+int
+_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
+{
+ return (rwlock_wrlock_common (rwlock, NULL));
+}
+
+int
+_pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
+ const struct timespec * __restrict abstime)
+{
+ return (rwlock_wrlock_common (rwlock, abstime));
+}
+
+int
+_pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
+{
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
+ int32_t state;
+
+ if (*rwlock == THR_PSHARED_PTR) {
+ prwlock = __thr_pshared_offpage(rwlock, 0);
+ if (prwlock == NULL)
+ return (EINVAL);
+ } else {
+ prwlock = *rwlock;
+ }
+
+ if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
+ return (EINVAL);
+
+ state = prwlock->lock.rw_state;
+ if (state & URWLOCK_WRITE_OWNER) {
+ if (__predict_false(prwlock->owner != TID(curthread)))
+ return (EPERM);
+ prwlock->owner = 0;
+ }
+
+ ret = _thr_rwlock_unlock(&prwlock->lock);
+ if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
+ curthread->rdlock_count--;
+
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_rwlockattr.c b/lib/libthr/thread/thr_rwlockattr.c
new file mode 100644
index 0000000000000..b0b8e70a44c4b
--- /dev/null
+++ b/lib/libthr/thread/thr_rwlockattr.c
@@ -0,0 +1,95 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 1998 Alex Nash
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_rwlockattr_destroy, pthread_rwlockattr_destroy);
+__weak_reference(_pthread_rwlockattr_getpshared, pthread_rwlockattr_getpshared);
+__weak_reference(_pthread_rwlockattr_init, pthread_rwlockattr_init);
+__weak_reference(_pthread_rwlockattr_setpshared, pthread_rwlockattr_setpshared);
+
+int
+_pthread_rwlockattr_destroy(pthread_rwlockattr_t *rwlockattr)
+{
+ pthread_rwlockattr_t prwlockattr;
+
+ if (rwlockattr == NULL)
+ return (EINVAL);
+ prwlockattr = *rwlockattr;
+ if (prwlockattr == NULL)
+ return (EINVAL);
+ free(prwlockattr);
+ return (0);
+}
+
+int
+_pthread_rwlockattr_getpshared(
+ const pthread_rwlockattr_t * __restrict rwlockattr,
+ int * __restrict pshared)
+{
+
+ *pshared = (*rwlockattr)->pshared;
+ return (0);
+}
+
+int
+_pthread_rwlockattr_init(pthread_rwlockattr_t *rwlockattr)
+{
+ pthread_rwlockattr_t prwlockattr;
+
+ if (rwlockattr == NULL)
+ return (EINVAL);
+
+ prwlockattr = malloc(sizeof(struct pthread_rwlockattr));
+ if (prwlockattr == NULL)
+ return (ENOMEM);
+
+ prwlockattr->pshared = PTHREAD_PROCESS_PRIVATE;
+ *rwlockattr = prwlockattr;
+ return (0);
+}
+
+int
+_pthread_rwlockattr_setpshared(pthread_rwlockattr_t *rwlockattr, int pshared)
+{
+
+ if (pshared != PTHREAD_PROCESS_PRIVATE &&
+ pshared != PTHREAD_PROCESS_SHARED)
+ return (EINVAL);
+ (*rwlockattr)->pshared = pshared;
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_self.c b/lib/libthr/thread/thr_self.c
new file mode 100644
index 0000000000000..eb7c197e4f291
--- /dev/null
+++ b/lib/libthr/thread/thr_self.c
@@ -0,0 +1,50 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_self, pthread_self);
+
+pthread_t
+_pthread_self(void)
+{
+ _thr_check_init();
+
+ /* Return the running thread pointer: */
+ return (_get_curthread());
+}
diff --git a/lib/libthr/thread/thr_sem.c b/lib/libthr/thread/thr_sem.c
new file mode 100644
index 0000000000000..66386764666a4
--- /dev/null
+++ b/lib/libthr/thread/thr_sem.c
@@ -0,0 +1,118 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
+ * Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <time.h>
+#include <_semaphore.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+FB10_COMPAT(_sem_init_compat, sem_init);
+FB10_COMPAT(_sem_destroy_compat, sem_destroy);
+FB10_COMPAT(_sem_getvalue_compat, sem_getvalue);
+FB10_COMPAT(_sem_trywait_compat, sem_trywait);
+FB10_COMPAT(_sem_wait_compat, sem_wait);
+FB10_COMPAT(_sem_timedwait_compat, sem_timedwait);
+FB10_COMPAT(_sem_post_compat, sem_post);
+
+typedef struct sem *sem_t;
+
+extern int _libc_sem_init_compat(sem_t *sem, int pshared, unsigned int value);
+extern int _libc_sem_destroy_compat(sem_t *sem);
+extern int _libc_sem_getvalue_compat(sem_t * __restrict sem, int * __restrict sval);
+extern int _libc_sem_trywait_compat(sem_t *sem);
+extern int _libc_sem_wait_compat(sem_t *sem);
+extern int _libc_sem_timedwait_compat(sem_t * __restrict sem,
+ const struct timespec * __restrict abstime);
+extern int _libc_sem_post_compat(sem_t *sem);
+
+int _sem_init_compat(sem_t *sem, int pshared, unsigned int value);
+int _sem_destroy_compat(sem_t *sem);
+int _sem_getvalue_compat(sem_t * __restrict sem, int * __restrict sval);
+int _sem_trywait_compat(sem_t *sem);
+int _sem_wait_compat(sem_t *sem);
+int _sem_timedwait_compat(sem_t * __restrict sem,
+ const struct timespec * __restrict abstime);
+int _sem_post_compat(sem_t *sem);
+
+int
+_sem_init_compat(sem_t *sem, int pshared, unsigned int value)
+{
+ return _libc_sem_init_compat(sem, pshared, value);
+}
+
+int
+_sem_destroy_compat(sem_t *sem)
+{
+ return _libc_sem_destroy_compat(sem);
+}
+
+int
+_sem_getvalue_compat(sem_t * __restrict sem, int * __restrict sval)
+{
+ return _libc_sem_getvalue_compat(sem, sval);
+}
+
+int
+_sem_trywait_compat(sem_t *sem)
+{
+ return _libc_sem_trywait_compat(sem);
+}
+
+int
+_sem_wait_compat(sem_t *sem)
+{
+ return _libc_sem_wait_compat(sem);
+}
+
+int
+_sem_timedwait_compat(sem_t * __restrict sem,
+ const struct timespec * __restrict abstime)
+{
+ return _libc_sem_timedwait_compat(sem, abstime);
+}
+
+int
+_sem_post_compat(sem_t *sem)
+{
+ return _libc_sem_post_compat(sem);
+}
diff --git a/lib/libthr/thread/thr_setprio.c b/lib/libthr/thread/thr_setprio.c
new file mode 100644
index 0000000000000..537b9b89f2012
--- /dev/null
+++ b/lib/libthr/thread/thr_setprio.c
@@ -0,0 +1,69 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_setprio, pthread_setprio);
+
+int
+_pthread_setprio(pthread_t pthread, int prio)
+{
+ struct pthread *curthread = _get_curthread();
+ struct sched_param param;
+ int ret;
+
+ param.sched_priority = prio;
+ if (pthread == curthread)
+ THR_LOCK(curthread);
+ else if ((ret = _thr_find_thread(curthread, pthread, /*include dead*/0)))
+ return (ret);
+ if (pthread->attr.sched_policy == SCHED_OTHER ||
+ pthread->attr.prio == prio) {
+ pthread->attr.prio = prio;
+ ret = 0;
+ } else {
+ ret = _thr_setscheduler(pthread->tid,
+ pthread->attr.sched_policy, &param);
+ if (ret == -1)
+ ret = errno;
+ else
+ pthread->attr.prio = prio;
+ }
+ THR_THREAD_UNLOCK(curthread, pthread);
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_setschedparam.c b/lib/libthr/thread/thr_setschedparam.c
new file mode 100644
index 0000000000000..cf2c1f919e124
--- /dev/null
+++ b/lib/libthr/thread/thr_setschedparam.c
@@ -0,0 +1,80 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/param.h>
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_setschedparam, pthread_setschedparam);
+
+/*
+ * Set a thread's scheduling parameters, this should be done
+ * in kernel, doing it in userland is no-op.
+ */
+int
+_pthread_setschedparam(pthread_t pthread, int policy,
+ const struct sched_param *param)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ if (pthread == curthread)
+ THR_LOCK(curthread);
+ else if ((ret = _thr_find_thread(curthread, pthread,
+ /*include dead*/0)) != 0)
+ return (ret);
+ if (pthread->attr.sched_policy == policy &&
+ (policy == SCHED_OTHER ||
+ pthread->attr.prio == param->sched_priority)) {
+ pthread->attr.prio = param->sched_priority;
+ THR_THREAD_UNLOCK(curthread, pthread);
+ return (0);
+ }
+ ret = _thr_setscheduler(pthread->tid, policy, param);
+ if (ret == -1)
+ ret = errno;
+ else {
+ pthread->attr.sched_policy = policy;
+ pthread->attr.prio = param->sched_priority;
+ }
+ THR_THREAD_UNLOCK(curthread, pthread);
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_sig.c b/lib/libthr/thread/thr_sig.c
new file mode 100644
index 0000000000000..576bc79de7951
--- /dev/null
+++ b/lib/libthr/thread/thr_sig.c
@@ -0,0 +1,763 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <signal.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include "un-namespace.h"
+#include "libc_private.h"
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+/* #define DEBUG_SIGNAL */
+#ifdef DEBUG_SIGNAL
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
+
+struct usigaction {
+ struct sigaction sigact;
+ struct urwlock lock;
+};
+
+static struct usigaction _thr_sigact[_SIG_MAXSIG];
+
+static inline struct usigaction *
+__libc_sigaction_slot(int signo)
+{
+
+ return (&_thr_sigact[signo - 1]);
+}
+
+static void thr_sighandler(int, siginfo_t *, void *);
+static void handle_signal(struct sigaction *, int, siginfo_t *, ucontext_t *);
+static void check_deferred_signal(struct pthread *);
+static void check_suspend(struct pthread *);
+static void check_cancel(struct pthread *curthread, ucontext_t *ucp);
+
+int _sigtimedwait(const sigset_t *set, siginfo_t *info,
+ const struct timespec * timeout);
+int _sigwaitinfo(const sigset_t *set, siginfo_t *info);
+int _sigwait(const sigset_t *set, int *sig);
+int _setcontext(const ucontext_t *);
+int _swapcontext(ucontext_t *, const ucontext_t *);
+
+static const sigset_t _thr_deferset={{
+ 0xffffffff & ~(_SIG_BIT(SIGBUS)|_SIG_BIT(SIGILL)|_SIG_BIT(SIGFPE)|
+ _SIG_BIT(SIGSEGV)|_SIG_BIT(SIGTRAP)|_SIG_BIT(SIGSYS)),
+ 0xffffffff,
+ 0xffffffff,
+ 0xffffffff}};
+
+static const sigset_t _thr_maskset={{
+ 0xffffffff,
+ 0xffffffff,
+ 0xffffffff,
+ 0xffffffff}};
+
+void
+_thr_signal_block(struct pthread *curthread)
+{
+
+ if (curthread->sigblock > 0) {
+ curthread->sigblock++;
+ return;
+ }
+ __sys_sigprocmask(SIG_BLOCK, &_thr_maskset, &curthread->sigmask);
+ curthread->sigblock++;
+}
+
+void
+_thr_signal_unblock(struct pthread *curthread)
+{
+ if (--curthread->sigblock == 0)
+ __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
+}
+
+int
+_thr_send_sig(struct pthread *thread, int sig)
+{
+ return thr_kill(thread->tid, sig);
+}
+
+static inline void
+remove_thr_signals(sigset_t *set)
+{
+ if (SIGISMEMBER(*set, SIGCANCEL))
+ SIGDELSET(*set, SIGCANCEL);
+}
+
+static const sigset_t *
+thr_remove_thr_signals(const sigset_t *set, sigset_t *newset)
+{
+ *newset = *set;
+ remove_thr_signals(newset);
+ return (newset);
+}
+
+static void
+sigcancel_handler(int sig __unused,
+ siginfo_t *info __unused, ucontext_t *ucp)
+{
+ struct pthread *curthread = _get_curthread();
+ int err;
+
+ if (THR_IN_CRITICAL(curthread))
+ return;
+ err = errno;
+ check_suspend(curthread);
+ check_cancel(curthread, ucp);
+ errno = err;
+}
+
+typedef void (*ohandler)(int sig, int code, struct sigcontext *scp,
+ char *addr, __sighandler_t *catcher);
+
+/*
+ * The signal handler wrapper is entered with all signal masked.
+ */
+static void
+thr_sighandler(int sig, siginfo_t *info, void *_ucp)
+{
+ struct pthread *curthread;
+ ucontext_t *ucp;
+ struct sigaction act;
+ struct usigaction *usa;
+ int err;
+
+ err = errno;
+ curthread = _get_curthread();
+ ucp = _ucp;
+ usa = __libc_sigaction_slot(sig);
+ _thr_rwl_rdlock(&usa->lock);
+ act = usa->sigact;
+ _thr_rwl_unlock(&usa->lock);
+ errno = err;
+ curthread->deferred_run = 0;
+
+ /*
+ * if a thread is in critical region, for example it holds low level locks,
+ * try to defer the signal processing, however if the signal is synchronous
+ * signal, it means a bad thing has happened, this is a programming error,
+ * resuming fault point can not help anything (normally causes deadloop),
+ * so here we let user code handle it immediately.
+ */
+ if (THR_IN_CRITICAL(curthread) && SIGISMEMBER(_thr_deferset, sig)) {
+ memcpy(&curthread->deferred_sigact, &act, sizeof(struct sigaction));
+ memcpy(&curthread->deferred_siginfo, info, sizeof(siginfo_t));
+ curthread->deferred_sigmask = ucp->uc_sigmask;
+ /* mask all signals, we will restore it later. */
+ ucp->uc_sigmask = _thr_deferset;
+ return;
+ }
+
+ handle_signal(&act, sig, info, ucp);
+}
+
+static void
+handle_signal(struct sigaction *actp, int sig, siginfo_t *info, ucontext_t *ucp)
+{
+ struct pthread *curthread = _get_curthread();
+ ucontext_t uc2;
+ __siginfohandler_t *sigfunc;
+ int cancel_point;
+ int cancel_async;
+ int cancel_enable;
+ int in_sigsuspend;
+ int err;
+
+ /* add previous level mask */
+ SIGSETOR(actp->sa_mask, ucp->uc_sigmask);
+
+ /* add this signal's mask */
+ if (!(actp->sa_flags & SA_NODEFER))
+ SIGADDSET(actp->sa_mask, sig);
+
+ in_sigsuspend = curthread->in_sigsuspend;
+ curthread->in_sigsuspend = 0;
+
+ /*
+ * If thread is in deferred cancellation mode, disable cancellation
+ * in signal handler.
+ * If user signal handler calls a cancellation point function, e.g,
+ * it calls write() to write data to file, because write() is a
+ * cancellation point, the thread is immediately cancelled if
+ * cancellation is pending, to avoid this problem while thread is in
+ * deferring mode, cancellation is temporarily disabled.
+ */
+ cancel_point = curthread->cancel_point;
+ cancel_async = curthread->cancel_async;
+ cancel_enable = curthread->cancel_enable;
+ curthread->cancel_point = 0;
+ if (!cancel_async)
+ curthread->cancel_enable = 0;
+
+ /* restore correct mask before calling user handler */
+ __sys_sigprocmask(SIG_SETMASK, &actp->sa_mask, NULL);
+
+ sigfunc = actp->sa_sigaction;
+
+ /*
+ * We have already reset cancellation point flags, so if user's code
+ * longjmp()s out of its signal handler, wish its jmpbuf was set
+ * outside of a cancellation point, in most cases, this would be
+ * true. However, there is no way to save cancel_enable in jmpbuf,
+ * so after setjmps() returns once more, the user code may need to
+ * re-set cancel_enable flag by calling pthread_setcancelstate().
+ */
+ if ((actp->sa_flags & SA_SIGINFO) != 0) {
+ sigfunc(sig, info, ucp);
+ } else {
+ ((ohandler)sigfunc)(sig, info->si_code,
+ (struct sigcontext *)ucp, info->si_addr,
+ (__sighandler_t *)sigfunc);
+ }
+ err = errno;
+
+ curthread->in_sigsuspend = in_sigsuspend;
+ curthread->cancel_point = cancel_point;
+ curthread->cancel_enable = cancel_enable;
+
+ memcpy(&uc2, ucp, sizeof(uc2));
+ SIGDELSET(uc2.uc_sigmask, SIGCANCEL);
+
+ /* reschedule cancellation */
+ check_cancel(curthread, &uc2);
+ errno = err;
+ syscall(SYS_sigreturn, &uc2);
+}
+
+void
+_thr_ast(struct pthread *curthread)
+{
+
+ if (!THR_IN_CRITICAL(curthread)) {
+ check_deferred_signal(curthread);
+ check_suspend(curthread);
+ check_cancel(curthread, NULL);
+ }
+}
+
+/* reschedule cancellation */
+static void
+check_cancel(struct pthread *curthread, ucontext_t *ucp)
+{
+
+ if (__predict_true(!curthread->cancel_pending ||
+ !curthread->cancel_enable || curthread->no_cancel))
+ return;
+
+ /*
+ * Otherwise, we are in defer mode, and we are at
+ * cancel point, tell kernel to not block the current
+ * thread on next cancelable system call.
+ *
+ * There are three cases we should call thr_wake() to
+ * turn on TDP_WAKEUP or send SIGCANCEL in kernel:
+ * 1) we are going to call a cancelable system call,
+ * non-zero cancel_point means we are already in
+ * cancelable state, next system call is cancelable.
+ * 2) because _thr_ast() may be called by
+ * THR_CRITICAL_LEAVE() which is used by rtld rwlock
+ * and any libthr internal locks, when rtld rwlock
+ * is used, it is mostly caused by an unresolved PLT.
+ * Those routines may clear the TDP_WAKEUP flag by
+ * invoking some system calls, in those cases, we
+ * also should reenable the flag.
+ * 3) thread is in sigsuspend(), and the syscall insists
+ * on getting a signal before it agrees to return.
+ */
+ if (curthread->cancel_point) {
+ if (curthread->in_sigsuspend && ucp) {
+ SIGADDSET(ucp->uc_sigmask, SIGCANCEL);
+ curthread->unblock_sigcancel = 1;
+ _thr_send_sig(curthread, SIGCANCEL);
+ } else
+ thr_wake(curthread->tid);
+ } else if (curthread->cancel_async) {
+ /*
+ * asynchronous cancellation mode, act upon
+ * immediately.
+ */
+ _pthread_exit_mask(PTHREAD_CANCELED,
+ ucp? &ucp->uc_sigmask : NULL);
+ }
+}
+
+static void
+check_deferred_signal(struct pthread *curthread)
+{
+ ucontext_t *uc;
+ struct sigaction act;
+ siginfo_t info;
+ int uc_len;
+
+ if (__predict_true(curthread->deferred_siginfo.si_signo == 0 ||
+ curthread->deferred_run))
+ return;
+
+ curthread->deferred_run = 1;
+ uc_len = __getcontextx_size();
+ uc = alloca(uc_len);
+ getcontext(uc);
+ if (curthread->deferred_siginfo.si_signo == 0) {
+ curthread->deferred_run = 0;
+ return;
+ }
+ __fillcontextx2((char *)uc);
+ act = curthread->deferred_sigact;
+ uc->uc_sigmask = curthread->deferred_sigmask;
+ memcpy(&info, &curthread->deferred_siginfo, sizeof(siginfo_t));
+ /* remove signal */
+ curthread->deferred_siginfo.si_signo = 0;
+ handle_signal(&act, info.si_signo, &info, uc);
+}
+
+static void
+check_suspend(struct pthread *curthread)
+{
+ uint32_t cycle;
+
+ if (__predict_true((curthread->flags &
+ (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED))
+ != THR_FLAGS_NEED_SUSPEND))
+ return;
+ if (curthread == _single_thread)
+ return;
+ if (curthread->force_exit)
+ return;
+
+ /*
+ * Blocks SIGCANCEL which other threads must send.
+ */
+ _thr_signal_block(curthread);
+
+ /*
+ * Increase critical_count, here we don't use THR_LOCK/UNLOCK
+ * because we are leaf code, we don't want to recursively call
+ * ourself.
+ */
+ curthread->critical_count++;
+ THR_UMUTEX_LOCK(curthread, &(curthread)->lock);
+ while ((curthread->flags & THR_FLAGS_NEED_SUSPEND) != 0) {
+ curthread->cycle++;
+ cycle = curthread->cycle;
+
+ /* Wake the thread suspending us. */
+ _thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
+
+ /*
+ * if we are from pthread_exit, we don't want to
+ * suspend, just go and die.
+ */
+ if (curthread->state == PS_DEAD)
+ break;
+ curthread->flags |= THR_FLAGS_SUSPENDED;
+ THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock);
+ _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0);
+ THR_UMUTEX_LOCK(curthread, &(curthread)->lock);
+ }
+ THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock);
+ curthread->critical_count--;
+
+ _thr_signal_unblock(curthread);
+}
+
+void
+_thr_signal_init(int dlopened)
+{
+ struct sigaction act, nact, oact;
+ struct usigaction *usa;
+ sigset_t oldset;
+ int sig, error;
+
+ if (dlopened) {
+ __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset);
+ for (sig = 1; sig <= _SIG_MAXSIG; sig++) {
+ if (sig == SIGCANCEL)
+ continue;
+ error = __sys_sigaction(sig, NULL, &oact);
+ if (error == -1 || oact.sa_handler == SIG_DFL ||
+ oact.sa_handler == SIG_IGN)
+ continue;
+ usa = __libc_sigaction_slot(sig);
+ usa->sigact = oact;
+ nact = oact;
+ remove_thr_signals(&usa->sigact.sa_mask);
+ nact.sa_flags &= ~SA_NODEFER;
+ nact.sa_flags |= SA_SIGINFO;
+ nact.sa_sigaction = thr_sighandler;
+ nact.sa_mask = _thr_maskset;
+ (void)__sys_sigaction(sig, &nact, NULL);
+ }
+ __sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
+ }
+
+ /* Install SIGCANCEL handler. */
+ SIGFILLSET(act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+ act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler;
+ __sys_sigaction(SIGCANCEL, &act, NULL);
+
+ /* Unblock SIGCANCEL */
+ SIGEMPTYSET(act.sa_mask);
+ SIGADDSET(act.sa_mask, SIGCANCEL);
+ __sys_sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL);
+}
+
+void
+_thr_sigact_unload(struct dl_phdr_info *phdr_info __unused)
+{
+#if 0
+ struct pthread *curthread = _get_curthread();
+ struct urwlock *rwlp;
+ struct sigaction *actp;
+ struct usigaction *usa;
+ struct sigaction kact;
+ void (*handler)(int);
+ int sig;
+
+ _thr_signal_block(curthread);
+ for (sig = 1; sig <= _SIG_MAXSIG; sig++) {
+ usa = __libc_sigaction_slot(sig);
+ actp = &usa->sigact;
+retry:
+ handler = actp->sa_handler;
+ if (handler != SIG_DFL && handler != SIG_IGN &&
+ __elf_phdr_match_addr(phdr_info, handler)) {
+ rwlp = &usa->lock;
+ _thr_rwl_wrlock(rwlp);
+ if (handler != actp->sa_handler) {
+ _thr_rwl_unlock(rwlp);
+ goto retry;
+ }
+ actp->sa_handler = SIG_DFL;
+ actp->sa_flags = SA_SIGINFO;
+ SIGEMPTYSET(actp->sa_mask);
+ if (__sys_sigaction(sig, NULL, &kact) == 0 &&
+ kact.sa_handler != SIG_DFL &&
+ kact.sa_handler != SIG_IGN)
+ __sys_sigaction(sig, actp, NULL);
+ _thr_rwl_unlock(rwlp);
+ }
+ }
+ _thr_signal_unblock(curthread);
+#endif
+}
+
+void
+_thr_signal_prefork(void)
+{
+ int i;
+
+ for (i = 1; i <= _SIG_MAXSIG; ++i)
+ _thr_rwl_rdlock(&__libc_sigaction_slot(i)->lock);
+}
+
+void
+_thr_signal_postfork(void)
+{
+ int i;
+
+ for (i = 1; i <= _SIG_MAXSIG; ++i)
+ _thr_rwl_unlock(&__libc_sigaction_slot(i)->lock);
+}
+
+void
+_thr_signal_postfork_child(void)
+{
+ int i;
+
+ for (i = 1; i <= _SIG_MAXSIG; ++i) {
+ bzero(&__libc_sigaction_slot(i) -> lock,
+ sizeof(struct urwlock));
+ }
+}
+
+void
+_thr_signal_deinit(void)
+{
+}
+
+int
+__thr_sigaction(int sig, const struct sigaction *act, struct sigaction *oact)
+{
+ struct sigaction newact, oldact, oldact2;
+ sigset_t oldset;
+ struct usigaction *usa;
+ int ret, err;
+
+ if (!_SIG_VALID(sig) || sig == SIGCANCEL) {
+ errno = EINVAL;
+ return (-1);
+ }
+
+ ret = 0;
+ err = 0;
+ usa = __libc_sigaction_slot(sig);
+
+ __sys_sigprocmask(SIG_SETMASK, &_thr_maskset, &oldset);
+ _thr_rwl_wrlock(&usa->lock);
+
+ if (act != NULL) {
+ oldact2 = usa->sigact;
+ newact = *act;
+
+ /*
+ * if a new sig handler is SIG_DFL or SIG_IGN,
+ * don't remove old handler from __libc_sigact[],
+ * so deferred signals still can use the handlers,
+ * multiple threads invoking sigaction itself is
+ * a race condition, so it is not a problem.
+ */
+ if (newact.sa_handler != SIG_DFL &&
+ newact.sa_handler != SIG_IGN) {
+ usa->sigact = newact;
+ remove_thr_signals(&usa->sigact.sa_mask);
+ newact.sa_flags &= ~SA_NODEFER;
+ newact.sa_flags |= SA_SIGINFO;
+ newact.sa_sigaction = thr_sighandler;
+ newact.sa_mask = _thr_maskset; /* mask all signals */
+ }
+ ret = __sys_sigaction(sig, &newact, &oldact);
+ if (ret == -1) {
+ err = errno;
+ usa->sigact = oldact2;
+ }
+ } else if (oact != NULL) {
+ ret = __sys_sigaction(sig, NULL, &oldact);
+ err = errno;
+ }
+
+ if (oldact.sa_handler != SIG_DFL && oldact.sa_handler != SIG_IGN) {
+ if (act != NULL)
+ oldact = oldact2;
+ else if (oact != NULL)
+ oldact = usa->sigact;
+ }
+
+ _thr_rwl_unlock(&usa->lock);
+ __sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
+
+ if (ret == 0) {
+ if (oact != NULL)
+ *oact = oldact;
+ } else {
+ errno = err;
+ }
+ return (ret);
+}
+
+int
+__thr_sigprocmask(int how, const sigset_t *set, sigset_t *oset)
+{
+ const sigset_t *p = set;
+ sigset_t newset;
+
+ if (how != SIG_UNBLOCK) {
+ if (set != NULL) {
+ newset = *set;
+ SIGDELSET(newset, SIGCANCEL);
+ p = &newset;
+ }
+ }
+ return (__sys_sigprocmask(how, p, oset));
+}
+
+__weak_reference(_pthread_sigmask, pthread_sigmask);
+
+int
+_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+
+ if (__thr_sigprocmask(how, set, oset))
+ return (errno);
+ return (0);
+}
+
+int
+_sigsuspend(const sigset_t * set)
+{
+ sigset_t newset;
+
+ return (__sys_sigsuspend(thr_remove_thr_signals(set, &newset)));
+}
+
+int
+__thr_sigsuspend(const sigset_t * set)
+{
+ struct pthread *curthread;
+ sigset_t newset;
+ int ret, old;
+
+ curthread = _get_curthread();
+
+ old = curthread->in_sigsuspend;
+ curthread->in_sigsuspend = 1;
+ _thr_cancel_enter(curthread);
+ ret = __sys_sigsuspend(thr_remove_thr_signals(set, &newset));
+ _thr_cancel_leave(curthread, 1);
+ curthread->in_sigsuspend = old;
+ if (curthread->unblock_sigcancel) {
+ curthread->unblock_sigcancel = 0;
+ SIGEMPTYSET(newset);
+ SIGADDSET(newset, SIGCANCEL);
+ __sys_sigprocmask(SIG_UNBLOCK, &newset, NULL);
+ }
+
+ return (ret);
+}
+
+int
+_sigtimedwait(const sigset_t *set, siginfo_t *info,
+ const struct timespec * timeout)
+{
+ sigset_t newset;
+
+ return (__sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info,
+ timeout));
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, if thread got signal,
+ * it is not canceled.
+ */
+int
+__thr_sigtimedwait(const sigset_t *set, siginfo_t *info,
+ const struct timespec * timeout)
+{
+ struct pthread *curthread = _get_curthread();
+ sigset_t newset;
+ int ret;
+
+ _thr_cancel_enter(curthread);
+ ret = __sys_sigtimedwait(thr_remove_thr_signals(set, &newset), info,
+ timeout);
+ _thr_cancel_leave(curthread, (ret == -1));
+ return (ret);
+}
+
+int
+_sigwaitinfo(const sigset_t *set, siginfo_t *info)
+{
+ sigset_t newset;
+
+ return (__sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info));
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, if thread got signal,
+ * it is not canceled.
+ */
+int
+__thr_sigwaitinfo(const sigset_t *set, siginfo_t *info)
+{
+ struct pthread *curthread = _get_curthread();
+ sigset_t newset;
+ int ret;
+
+ _thr_cancel_enter(curthread);
+ ret = __sys_sigwaitinfo(thr_remove_thr_signals(set, &newset), info);
+ _thr_cancel_leave(curthread, ret == -1);
+ return (ret);
+}
+
+int
+_sigwait(const sigset_t *set, int *sig)
+{
+ sigset_t newset;
+
+ return (__sys_sigwait(thr_remove_thr_signals(set, &newset), sig));
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, if thread got signal,
+ * it is not canceled.
+ */
+int
+__thr_sigwait(const sigset_t *set, int *sig)
+{
+ struct pthread *curthread = _get_curthread();
+ sigset_t newset;
+ int ret;
+
+ do {
+ _thr_cancel_enter(curthread);
+ ret = __sys_sigwait(thr_remove_thr_signals(set, &newset), sig);
+ _thr_cancel_leave(curthread, (ret != 0));
+ } while (ret == EINTR);
+ return (ret);
+}
+
+int
+__thr_setcontext(const ucontext_t *ucp)
+{
+ ucontext_t uc;
+
+ if (ucp == NULL) {
+ errno = EINVAL;
+ return (-1);
+ }
+ if (!SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL))
+ return (__sys_setcontext(ucp));
+ (void) memcpy(&uc, ucp, sizeof(uc));
+ SIGDELSET(uc.uc_sigmask, SIGCANCEL);
+ return (__sys_setcontext(&uc));
+}
+
+int
+__thr_swapcontext(ucontext_t *oucp, const ucontext_t *ucp)
+{
+ ucontext_t uc;
+
+ if (oucp == NULL || ucp == NULL) {
+ errno = EINVAL;
+ return (-1);
+ }
+ if (SIGISMEMBER(ucp->uc_sigmask, SIGCANCEL)) {
+ (void) memcpy(&uc, ucp, sizeof(uc));
+ SIGDELSET(uc.uc_sigmask, SIGCANCEL);
+ ucp = &uc;
+ }
+ return (__sys_swapcontext(oucp, ucp));
+}
diff --git a/lib/libthr/thread/thr_single_np.c b/lib/libthr/thread/thr_single_np.c
new file mode 100644
index 0000000000000..7aecfc93b3e34
--- /dev/null
+++ b/lib/libthr/thread/thr_single_np.c
@@ -0,0 +1,53 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1996 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+__weak_reference(_pthread_single_np, pthread_single_np);
+
+int
+_pthread_single_np(void)
+{
+
+ /* Enter single-threaded (non-POSIX) scheduling mode: */
+ _pthread_suspend_all_np();
+ /*
+ * XXX - Do we want to do this?
+ * __is_threaded = 0;
+ */
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_sleepq.c b/lib/libthr/thread/thr_sleepq.c
new file mode 100644
index 0000000000000..05145a739681a
--- /dev/null
+++ b/lib/libthr/thread/thr_sleepq.c
@@ -0,0 +1,186 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2010 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <stdlib.h>
+#include "thr_private.h"
+
+#define HASHSHIFT 9
+#define HASHSIZE (1 << HASHSHIFT)
+#define SC_HASH(wchan) ((unsigned) \
+ ((((uintptr_t)(wchan) >> 3) \
+ ^ ((uintptr_t)(wchan) >> (HASHSHIFT + 3))) \
+ & (HASHSIZE - 1)))
+#define SC_LOOKUP(wc) &sc_table[SC_HASH(wc)]
+
+struct sleepqueue_chain {
+ struct umutex sc_lock;
+ int sc_enqcnt;
+ LIST_HEAD(, sleepqueue) sc_queues;
+ int sc_type;
+};
+
+static struct sleepqueue_chain sc_table[HASHSIZE];
+
+void
+_sleepq_init(void)
+{
+ int i;
+
+ for (i = 0; i < HASHSIZE; ++i) {
+ LIST_INIT(&sc_table[i].sc_queues);
+ _thr_umutex_init(&sc_table[i].sc_lock);
+ }
+}
+
+struct sleepqueue *
+_sleepq_alloc(void)
+{
+ struct sleepqueue *sq;
+
+ sq = calloc(1, sizeof(struct sleepqueue));
+ TAILQ_INIT(&sq->sq_blocked);
+ SLIST_INIT(&sq->sq_freeq);
+ return (sq);
+}
+
+void
+_sleepq_free(struct sleepqueue *sq)
+{
+ free(sq);
+}
+
+void
+_sleepq_lock(void *wchan)
+{
+ struct pthread *curthread = _get_curthread();
+ struct sleepqueue_chain *sc;
+
+ sc = SC_LOOKUP(wchan);
+ THR_LOCK_ACQUIRE_SPIN(curthread, &sc->sc_lock);
+}
+
+void
+_sleepq_unlock(void *wchan)
+{
+ struct sleepqueue_chain *sc;
+ struct pthread *curthread = _get_curthread();
+
+ sc = SC_LOOKUP(wchan);
+ THR_LOCK_RELEASE(curthread, &sc->sc_lock);
+}
+
+static inline struct sleepqueue *
+lookup(struct sleepqueue_chain *sc, void *wchan)
+{
+ struct sleepqueue *sq;
+
+ LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
+ if (sq->sq_wchan == wchan)
+ return (sq);
+ return (NULL);
+}
+
+struct sleepqueue *
+_sleepq_lookup(void *wchan)
+{
+ return (lookup(SC_LOOKUP(wchan), wchan));
+}
+
+void
+_sleepq_add(void *wchan, struct pthread *td)
+{
+ struct sleepqueue_chain *sc;
+ struct sleepqueue *sq;
+
+ sc = SC_LOOKUP(wchan);
+ sq = lookup(sc, wchan);
+ if (sq != NULL) {
+ SLIST_INSERT_HEAD(&sq->sq_freeq, td->sleepqueue, sq_flink);
+ } else {
+ sq = td->sleepqueue;
+ LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
+ sq->sq_wchan = wchan;
+ /* sq->sq_type = type; */
+ }
+ td->sleepqueue = NULL;
+ td->wchan = wchan;
+ if (((++sc->sc_enqcnt << _thr_queuefifo) & 0xff) != 0)
+ TAILQ_INSERT_HEAD(&sq->sq_blocked, td, wle);
+ else
+ TAILQ_INSERT_TAIL(&sq->sq_blocked, td, wle);
+}
+
+int
+_sleepq_remove(struct sleepqueue *sq, struct pthread *td)
+{
+ int rc;
+
+ TAILQ_REMOVE(&sq->sq_blocked, td, wle);
+ if (TAILQ_EMPTY(&sq->sq_blocked)) {
+ LIST_REMOVE(sq, sq_hash);
+ td->sleepqueue = sq;
+ rc = 0;
+ } else {
+ td->sleepqueue = SLIST_FIRST(&sq->sq_freeq);
+ SLIST_REMOVE_HEAD(&sq->sq_freeq, sq_flink);
+ rc = 1;
+ }
+ td->wchan = NULL;
+ return (rc);
+}
+
+void
+_sleepq_drop(struct sleepqueue *sq,
+ void (*cb)(struct pthread *, void *arg), void *arg)
+{
+ struct pthread *td;
+ struct sleepqueue *sq2;
+
+ td = TAILQ_FIRST(&sq->sq_blocked);
+ if (td == NULL)
+ return;
+ LIST_REMOVE(sq, sq_hash);
+ TAILQ_REMOVE(&sq->sq_blocked, td, wle);
+ if (cb != NULL)
+ cb(td, arg);
+ td->sleepqueue = sq;
+ td->wchan = NULL;
+ sq2 = SLIST_FIRST(&sq->sq_freeq);
+ TAILQ_FOREACH(td, &sq->sq_blocked, wle) {
+ if (cb != NULL)
+ cb(td, arg);
+ td->sleepqueue = sq2;
+ td->wchan = NULL;
+ sq2 = SLIST_NEXT(sq2, sq_flink);
+ }
+ TAILQ_INIT(&sq->sq_blocked);
+ SLIST_INIT(&sq->sq_freeq);
+}
diff --git a/lib/libthr/thread/thr_spec.c b/lib/libthr/thread/thr_spec.c
new file mode 100644
index 0000000000000..faa88e2fc8bac
--- /dev/null
+++ b/lib/libthr/thread/thr_spec.c
@@ -0,0 +1,246 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/mman.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <pthread.h>
+#include "un-namespace.h"
+#include "libc_private.h"
+
+#include "thr_private.h"
+
+/* Used in symbol lookup of libthread_db */
+struct pthread_key _thread_keytable[PTHREAD_KEYS_MAX];
+
+__weak_reference(_pthread_key_create, pthread_key_create);
+__weak_reference(_pthread_key_delete, pthread_key_delete);
+__weak_reference(_pthread_getspecific, pthread_getspecific);
+__weak_reference(_pthread_setspecific, pthread_setspecific);
+
+
+int
+_pthread_key_create(pthread_key_t *key, void (*destructor)(void *))
+{
+ struct pthread *curthread;
+ int i;
+
+ _thr_check_init();
+
+ curthread = _get_curthread();
+
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
+
+ if (_thread_keytable[i].allocated == 0) {
+ _thread_keytable[i].allocated = 1;
+ _thread_keytable[i].destructor = destructor;
+ _thread_keytable[i].seqno++;
+
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ *key = i + 1;
+ return (0);
+ }
+
+ }
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ return (EAGAIN);
+}
+
+int
+_pthread_key_delete(pthread_key_t userkey)
+{
+ struct pthread *curthread;
+ int key, ret;
+
+ key = userkey - 1;
+ if ((unsigned int)key >= PTHREAD_KEYS_MAX)
+ return (EINVAL);
+ curthread = _get_curthread();
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ if (_thread_keytable[key].allocated) {
+ _thread_keytable[key].allocated = 0;
+ ret = 0;
+ } else {
+ ret = EINVAL;
+ }
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ return (ret);
+}
+
+void
+_thread_cleanupspecific(void)
+{
+ struct pthread *curthread;
+ void (*destructor)(void *);
+ const void *data;
+ int i, key;
+
+ curthread = _get_curthread();
+ if (curthread->specific == NULL)
+ return;
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ for (i = 0; i < PTHREAD_DESTRUCTOR_ITERATIONS &&
+ curthread->specific_data_count > 0; i++) {
+ for (key = 0; key < PTHREAD_KEYS_MAX &&
+ curthread->specific_data_count > 0; key++) {
+ destructor = NULL;
+
+ if (_thread_keytable[key].allocated &&
+ (curthread->specific[key].data != NULL)) {
+ if (curthread->specific[key].seqno ==
+ _thread_keytable[key].seqno) {
+ data = curthread->specific[key].data;
+ destructor = _thread_keytable[key].
+ destructor;
+ }
+ curthread->specific[key].data = NULL;
+ curthread->specific_data_count--;
+ } else if (curthread->specific[key].data != NULL) {
+ /*
+ * This can happen if the key is
+ * deleted via pthread_key_delete
+ * without first setting the value to
+ * NULL in all threads. POSIX says
+ * that the destructor is not invoked
+ * in this case.
+ */
+ curthread->specific[key].data = NULL;
+ curthread->specific_data_count--;
+ }
+
+ /*
+ * If there is a destructor, call it with the
+ * key table entry unlocked.
+ */
+ if (destructor != NULL) {
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ destructor(__DECONST(void *, data));
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ }
+ }
+ }
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ munmap(curthread->specific, PTHREAD_KEYS_MAX * sizeof(struct
+ pthread_specific_elem));
+ curthread->specific = NULL;
+ if (curthread->specific_data_count > 0) {
+ stderr_debug("Thread %p has exited with leftover "
+ "thread-specific data after %d destructor iterations\n",
+ curthread, PTHREAD_DESTRUCTOR_ITERATIONS);
+ }
+}
+
+int
+_pthread_setspecific(pthread_key_t userkey, const void *value)
+{
+ struct pthread *pthread;
+ void *tmp;
+ pthread_key_t key;
+
+ key = userkey - 1;
+ if ((unsigned int)key >= PTHREAD_KEYS_MAX ||
+ !_thread_keytable[key].allocated)
+ return (EINVAL);
+
+ pthread = _get_curthread();
+ if (pthread->specific == NULL) {
+ tmp = mmap(NULL, PTHREAD_KEYS_MAX *
+ sizeof(struct pthread_specific_elem),
+ PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (tmp == MAP_FAILED)
+ return (ENOMEM);
+ pthread->specific = tmp;
+ }
+ if (pthread->specific[key].data == NULL) {
+ if (value != NULL)
+ pthread->specific_data_count++;
+ } else if (value == NULL)
+ pthread->specific_data_count--;
+ pthread->specific[key].data = value;
+ pthread->specific[key].seqno = _thread_keytable[key].seqno;
+ return (0);
+}
+
+void *
+_pthread_getspecific(pthread_key_t userkey)
+{
+ struct pthread *pthread;
+ const void *data;
+ pthread_key_t key;
+
+ /* Check if there is specific data. */
+ key = userkey - 1;
+ if ((unsigned int)key >= PTHREAD_KEYS_MAX)
+ return (NULL);
+
+ pthread = _get_curthread();
+ /* Check if this key has been used before. */
+ if (_thread_keytable[key].allocated && pthread->specific != NULL &&
+ pthread->specific[key].seqno == _thread_keytable[key].seqno) {
+ /* Return the value: */
+ data = pthread->specific[key].data;
+ } else {
+ /*
+ * This key has not been used before, so return NULL
+ * instead.
+ */
+ data = NULL;
+ }
+ return (__DECONST(void *, data));
+}
+
+void
+_thr_tsd_unload(struct dl_phdr_info *phdr_info)
+{
+ struct pthread *curthread;
+ void (*destructor)(void *);
+ int key;
+
+ curthread = _get_curthread();
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ for (key = 0; key < PTHREAD_KEYS_MAX; key++) {
+ if (!_thread_keytable[key].allocated)
+ continue;
+ destructor = _thread_keytable[key].destructor;
+ if (destructor == NULL)
+ continue;
+ if (__elf_phdr_match_addr(phdr_info, destructor))
+ _thread_keytable[key].destructor = NULL;
+ }
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+}
diff --git a/lib/libthr/thread/thr_spinlock.c b/lib/libthr/thread/thr_spinlock.c
new file mode 100644
index 0000000000000..8680a7229018c
--- /dev/null
+++ b/lib/libthr/thread/thr_spinlock.c
@@ -0,0 +1,126 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <pthread.h>
+#include <libc_private.h>
+#include <spinlock.h>
+
+#include "thr_private.h"
+
+#define MAX_SPINLOCKS 72
+
+/*
+ * These data structures are used to trace all spinlocks
+ * in libc.
+ */
+struct spinlock_extra {
+ spinlock_t *owner;
+ struct umutex lock;
+};
+
+static struct umutex spinlock_static_lock = DEFAULT_UMUTEX;
+static struct spinlock_extra extra[MAX_SPINLOCKS];
+static int spinlock_count;
+static int initialized;
+
+static void init_spinlock(spinlock_t *lck);
+
+/*
+ * These are for compatability only. Spinlocks of this type
+ * are deprecated.
+ */
+
+void
+__thr_spinunlock(spinlock_t *lck)
+{
+ struct spinlock_extra *_extra;
+
+ _extra = lck->thr_extra;
+ THR_UMUTEX_UNLOCK(_get_curthread(), &_extra->lock);
+}
+
+void
+__thr_spinlock(spinlock_t *lck)
+{
+ struct spinlock_extra *_extra;
+
+ if (!__isthreaded)
+ PANIC("Spinlock called when not threaded.");
+ if (!initialized)
+ PANIC("Spinlocks not initialized.");
+ if (lck->thr_extra == NULL)
+ init_spinlock(lck);
+ _extra = lck->thr_extra;
+ THR_UMUTEX_LOCK(_get_curthread(), &_extra->lock);
+}
+
+static void
+init_spinlock(spinlock_t *lck)
+{
+ struct pthread *curthread = _get_curthread();
+
+ THR_UMUTEX_LOCK(curthread, &spinlock_static_lock);
+ if ((lck->thr_extra == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
+ lck->thr_extra = &extra[spinlock_count];
+ _thr_umutex_init(&extra[spinlock_count].lock);
+ extra[spinlock_count].owner = lck;
+ spinlock_count++;
+ }
+ THR_UMUTEX_UNLOCK(curthread, &spinlock_static_lock);
+ if (lck->thr_extra == NULL)
+ PANIC("Warning: exceeded max spinlocks");
+}
+
+void
+_thr_spinlock_init(void)
+{
+ int i;
+
+ _thr_umutex_init(&spinlock_static_lock);
+ if (initialized != 0) {
+ /*
+ * called after fork() to reset state of libc spin locks,
+ * it is not quite right since libc may be in inconsistent
+ * state, resetting the locks to allow current thread to be
+ * able to hold them may not help things too much, but
+ * anyway, we do our best.
+ * it is better to do pthread_atfork in libc.
+ */
+ for (i = 0; i < spinlock_count; i++)
+ _thr_umutex_init(&extra[i].lock);
+ } else {
+ initialized = 1;
+ }
+}
diff --git a/lib/libthr/thread/thr_stack.c b/lib/libthr/thread/thr_stack.c
new file mode 100644
index 0000000000000..b08bafdd94176
--- /dev/null
+++ b/lib/libthr/thread/thr_stack.c
@@ -0,0 +1,320 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <link.h>
+
+#include "thr_private.h"
+
+/* Spare thread stack. */
+struct stack {
+ LIST_ENTRY(stack) qe; /* Stack queue linkage. */
+ size_t stacksize; /* Stack size (rounded up). */
+ size_t guardsize; /* Guard size. */
+ void *stackaddr; /* Stack address. */
+};
+
+/*
+ * Default sized (stack and guard) spare stack queue. Stacks are cached
+ * to avoid additional complexity managing mmap()ed stack regions. Spare
+ * stacks are used in LIFO order to increase cache locality.
+ */
+static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
+
+/*
+ * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
+ * Stacks are cached to avoid additional complexity managing mmap()ed
+ * stack regions. This list is unordered, since ordering on both stack
+ * size and guard size would be more trouble than it's worth. Stacks are
+ * allocated from this cache on a first size match basis.
+ */
+static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
+
+/**
+ * Base address of the last stack allocated (including its red zone, if
+ * there is one). Stacks are allocated contiguously, starting beyond the
+ * top of the main stack. When a new stack is created, a red zone is
+ * typically created (actually, the red zone is mapped with PROT_NONE) above
+ * the top of the stack, such that the stack will not be able to grow all
+ * the way to the bottom of the next stack. This isn't fool-proof. It is
+ * possible for a stack to grow by a large amount, such that it grows into
+ * the next stack, and as long as the memory within the red zone is never
+ * accessed, nothing will prevent one thread stack from trouncing all over
+ * the next.
+ *
+ * low memory
+ * . . . . . . . . . . . . . . . . . .
+ * | |
+ * | stack 3 | start of 3rd thread stack
+ * +-----------------------------------+
+ * | |
+ * | Red Zone (guard page) | red zone for 2nd thread
+ * | |
+ * +-----------------------------------+
+ * | stack 2 - _thr_stack_default | top of 2nd thread stack
+ * | |
+ * | |
+ * | |
+ * | |
+ * | stack 2 |
+ * +-----------------------------------+ <-- start of 2nd thread stack
+ * | |
+ * | Red Zone | red zone for 1st thread
+ * | |
+ * +-----------------------------------+
+ * | stack 1 - _thr_stack_default | top of 1st thread stack
+ * | |
+ * | |
+ * | |
+ * | |
+ * | stack 1 |
+ * +-----------------------------------+ <-- start of 1st thread stack
+ * | | (initial value of last_stack)
+ * | Red Zone |
+ * | | red zone for main thread
+ * +-----------------------------------+
+ * | USRSTACK - _thr_stack_initial | top of main thread stack
+ * | | ^
+ * | | |
+ * | | |
+ * | | | stack growth
+ * | |
+ * +-----------------------------------+ <-- start of main thread stack
+ * (USRSTACK)
+ * high memory
+ *
+ */
+static char *last_stack = NULL;
+
+/*
+ * Round size up to the nearest multiple of
+ * _thr_page_size.
+ */
+static inline size_t
+round_up(size_t size)
+{
+ if (size % _thr_page_size != 0)
+ size = ((size / _thr_page_size) + 1) *
+ _thr_page_size;
+ return size;
+}
+
+void
+_thr_stack_fix_protection(struct pthread *thrd)
+{
+
+ mprotect((char *)thrd->attr.stackaddr_attr +
+ round_up(thrd->attr.guardsize_attr),
+ round_up(thrd->attr.stacksize_attr),
+ _rtld_get_stack_prot());
+}
+
+static void
+singlethread_map_stacks_exec(void)
+{
+ int mib[2];
+ struct rlimit rlim;
+ u_long usrstack;
+ size_t len;
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_USRSTACK;
+ len = sizeof(usrstack);
+ if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), &usrstack, &len, NULL, 0)
+ == -1)
+ return;
+ if (getrlimit(RLIMIT_STACK, &rlim) == -1)
+ return;
+ mprotect((void *)(uintptr_t)(usrstack - rlim.rlim_cur),
+ rlim.rlim_cur, _rtld_get_stack_prot());
+}
+
+void
+__thr_map_stacks_exec(void)
+{
+ struct pthread *curthread, *thrd;
+ struct stack *st;
+
+ if (!_thr_is_inited()) {
+ singlethread_map_stacks_exec();
+ return;
+ }
+ curthread = _get_curthread();
+ THREAD_LIST_RDLOCK(curthread);
+ LIST_FOREACH(st, &mstackq, qe)
+ mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
+ _rtld_get_stack_prot());
+ LIST_FOREACH(st, &dstackq, qe)
+ mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
+ _rtld_get_stack_prot());
+ TAILQ_FOREACH(thrd, &_thread_gc_list, gcle)
+ _thr_stack_fix_protection(thrd);
+ TAILQ_FOREACH(thrd, &_thread_list, tle)
+ _thr_stack_fix_protection(thrd);
+ THREAD_LIST_UNLOCK(curthread);
+}
+
+int
+_thr_stack_alloc(struct pthread_attr *attr)
+{
+ struct pthread *curthread = _get_curthread();
+ struct stack *spare_stack;
+ size_t stacksize;
+ size_t guardsize;
+ char *stackaddr;
+
+ /*
+ * Round up stack size to nearest multiple of _thr_page_size so
+ * that mmap() * will work. If the stack size is not an even
+ * multiple, we end up initializing things such that there is
+ * unused space above the beginning of the stack, so the stack
+ * sits snugly against its guard.
+ */
+ stacksize = round_up(attr->stacksize_attr);
+ guardsize = round_up(attr->guardsize_attr);
+
+ attr->stackaddr_attr = NULL;
+ attr->flags &= ~THR_STACK_USER;
+
+ /*
+ * Use the garbage collector lock for synchronization of the
+ * spare stack lists and allocations from usrstack.
+ */
+ THREAD_LIST_WRLOCK(curthread);
+ /*
+ * If the stack and guard sizes are default, try to allocate a stack
+ * from the default-size stack cache:
+ */
+ if ((stacksize == THR_STACK_DEFAULT) &&
+ (guardsize == _thr_guard_default)) {
+ if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
+ /* Use the spare stack. */
+ LIST_REMOVE(spare_stack, qe);
+ attr->stackaddr_attr = spare_stack->stackaddr;
+ }
+ }
+ /*
+ * The user specified a non-default stack and/or guard size, so try to
+ * allocate a stack from the non-default size stack cache, using the
+ * rounded up stack size (stack_size) in the search:
+ */
+ else {
+ LIST_FOREACH(spare_stack, &mstackq, qe) {
+ if (spare_stack->stacksize == stacksize &&
+ spare_stack->guardsize == guardsize) {
+ LIST_REMOVE(spare_stack, qe);
+ attr->stackaddr_attr = spare_stack->stackaddr;
+ break;
+ }
+ }
+ }
+ if (attr->stackaddr_attr != NULL) {
+ /* A cached stack was found. Release the lock. */
+ THREAD_LIST_UNLOCK(curthread);
+ }
+ else {
+ /*
+ * Allocate a stack from or below usrstack, depending
+ * on the LIBPTHREAD_BIGSTACK_MAIN env variable.
+ */
+ if (last_stack == NULL)
+ last_stack = _usrstack - _thr_stack_initial -
+ _thr_guard_default;
+
+ /* Allocate a new stack. */
+ stackaddr = last_stack - stacksize - guardsize;
+
+ /*
+ * Even if stack allocation fails, we don't want to try to
+ * use this location again, so unconditionally decrement
+ * last_stack. Under normal operating conditions, the most
+ * likely reason for an mmap() error is a stack overflow of
+ * the adjacent thread stack.
+ */
+ last_stack -= (stacksize + guardsize);
+
+ /* Release the lock before mmap'ing it. */
+ THREAD_LIST_UNLOCK(curthread);
+
+ /* Map the stack and guard page together, and split guard
+ page from allocated space: */
+ if ((stackaddr = mmap(stackaddr, stacksize + guardsize,
+ _rtld_get_stack_prot(), MAP_STACK,
+ -1, 0)) != MAP_FAILED &&
+ (guardsize == 0 ||
+ mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
+ stackaddr += guardsize;
+ } else {
+ if (stackaddr != MAP_FAILED)
+ munmap(stackaddr, stacksize + guardsize);
+ stackaddr = NULL;
+ }
+ attr->stackaddr_attr = stackaddr;
+ }
+ if (attr->stackaddr_attr != NULL)
+ return (0);
+ else
+ return (-1);
+}
+
+/* This function must be called with _thread_list_lock held. */
+void
+_thr_stack_free(struct pthread_attr *attr)
+{
+ struct stack *spare_stack;
+
+ if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
+ && (attr->stackaddr_attr != NULL)) {
+ spare_stack = (struct stack *)
+ ((char *)attr->stackaddr_attr +
+ attr->stacksize_attr - sizeof(struct stack));
+ spare_stack->stacksize = round_up(attr->stacksize_attr);
+ spare_stack->guardsize = round_up(attr->guardsize_attr);
+ spare_stack->stackaddr = attr->stackaddr_attr;
+
+ if (spare_stack->stacksize == THR_STACK_DEFAULT &&
+ spare_stack->guardsize == _thr_guard_default) {
+ /* Default stack/guard size. */
+ LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
+ } else {
+ /* Non-default stack/guard size. */
+ LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
+ }
+ attr->stackaddr_attr = NULL;
+ }
+}
diff --git a/lib/libthr/thread/thr_suspend_np.c b/lib/libthr/thread/thr_suspend_np.c
new file mode 100644
index 0000000000000..2d68582623dd5
--- /dev/null
+++ b/lib/libthr/thread/thr_suspend_np.c
@@ -0,0 +1,186 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+static int suspend_common(struct pthread *, struct pthread *,
+ int);
+
+__weak_reference(_pthread_suspend_np, pthread_suspend_np);
+__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
+
+/* Suspend a thread: */
+int
+_pthread_suspend_np(pthread_t thread)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ /* Suspending the current thread doesn't make sense. */
+ if (thread == _get_curthread())
+ ret = EDEADLK;
+
+ /* Add a reference to the thread: */
+ else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0))
+ == 0) {
+ /* Lock the threads scheduling queue: */
+ THR_THREAD_LOCK(curthread, thread);
+ suspend_common(curthread, thread, 1);
+ /* Unlock the threads scheduling queue: */
+ THR_THREAD_UNLOCK(curthread, thread);
+
+ /* Don't forget to remove the reference: */
+ _thr_ref_delete(curthread, thread);
+ }
+ return (ret);
+}
+
+void
+_thr_suspend_all_lock(struct pthread *curthread)
+{
+ int old;
+
+ THR_LOCK_ACQUIRE(curthread, &_suspend_all_lock);
+ while (_single_thread != NULL) {
+ old = _suspend_all_cycle;
+ _suspend_all_waiters++;
+ THR_LOCK_RELEASE(curthread, &_suspend_all_lock);
+ _thr_umtx_wait_uint(&_suspend_all_cycle, old, NULL, 0);
+ THR_LOCK_ACQUIRE(curthread, &_suspend_all_lock);
+ _suspend_all_waiters--;
+ }
+ _single_thread = curthread;
+ THR_LOCK_RELEASE(curthread, &_suspend_all_lock);
+}
+
+void
+_thr_suspend_all_unlock(struct pthread *curthread)
+{
+
+ THR_LOCK_ACQUIRE(curthread, &_suspend_all_lock);
+ _single_thread = NULL;
+ if (_suspend_all_waiters != 0) {
+ _suspend_all_cycle++;
+ _thr_umtx_wake(&_suspend_all_cycle, INT_MAX, 0);
+ }
+ THR_LOCK_RELEASE(curthread, &_suspend_all_lock);
+}
+
+void
+_pthread_suspend_all_np(void)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread *thread;
+ int old_nocancel;
+ int ret;
+
+ old_nocancel = curthread->no_cancel;
+ curthread->no_cancel = 1;
+ _thr_suspend_all_lock(curthread);
+ THREAD_LIST_RDLOCK(curthread);
+ TAILQ_FOREACH(thread, &_thread_list, tle) {
+ if (thread != curthread) {
+ THR_THREAD_LOCK(curthread, thread);
+ if (thread->state != PS_DEAD &&
+ !(thread->flags & THR_FLAGS_SUSPENDED))
+ thread->flags |= THR_FLAGS_NEED_SUSPEND;
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+ }
+ thr_kill(-1, SIGCANCEL);
+
+restart:
+ TAILQ_FOREACH(thread, &_thread_list, tle) {
+ if (thread != curthread) {
+ /* First try to suspend the thread without waiting */
+ THR_THREAD_LOCK(curthread, thread);
+ ret = suspend_common(curthread, thread, 0);
+ if (ret == 0) {
+ THREAD_LIST_UNLOCK(curthread);
+ /* Can not suspend, try to wait */
+ THR_REF_ADD(curthread, thread);
+ suspend_common(curthread, thread, 1);
+ THR_REF_DEL(curthread, thread);
+ _thr_try_gc(curthread, thread);
+ /* thread lock released */
+
+ THREAD_LIST_RDLOCK(curthread);
+ /*
+ * Because we were blocked, things may have
+ * been changed, we have to restart the
+ * process.
+ */
+ goto restart;
+ }
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+ }
+ THREAD_LIST_UNLOCK(curthread);
+ _thr_suspend_all_unlock(curthread);
+ curthread->no_cancel = old_nocancel;
+ _thr_testcancel(curthread);
+}
+
+static int
+suspend_common(struct pthread *curthread, struct pthread *thread,
+ int waitok)
+{
+ uint32_t tmp;
+
+ while (thread->state != PS_DEAD &&
+ !(thread->flags & THR_FLAGS_SUSPENDED)) {
+ thread->flags |= THR_FLAGS_NEED_SUSPEND;
+ /* Thread is in creation. */
+ if (thread->tid == TID_TERMINATED)
+ return (1);
+ tmp = thread->cycle;
+ _thr_send_sig(thread, SIGCANCEL);
+ THR_THREAD_UNLOCK(curthread, thread);
+ if (waitok) {
+ _thr_umtx_wait_uint(&thread->cycle, tmp, NULL, 0);
+ THR_THREAD_LOCK(curthread, thread);
+ } else {
+ THR_THREAD_LOCK(curthread, thread);
+ return (0);
+ }
+ }
+
+ return (1);
+}
diff --git a/lib/libthr/thread/thr_switch_np.c b/lib/libthr/thread/thr_switch_np.c
new file mode 100644
index 0000000000000..aad8641e48060
--- /dev/null
+++ b/lib/libthr/thread/thr_switch_np.c
@@ -0,0 +1,60 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <errno.h>
+#include <pthread.h>
+#include <pthread_np.h>
+#include "un-namespace.h"
+
+#include "thr_private.h"
+
+
+__weak_reference(_pthread_switch_add_np, pthread_switch_add_np);
+__weak_reference(_pthread_switch_delete_np, pthread_switch_delete_np);
+
+int
+_pthread_switch_add_np(pthread_switch_routine_t routine __unused)
+{
+ return (ENOTSUP);
+}
+
+int
+_pthread_switch_delete_np(pthread_switch_routine_t routine __unused)
+{
+ return (ENOTSUP);
+}
diff --git a/lib/libthr/thread/thr_symbols.c b/lib/libthr/thread/thr_symbols.c
new file mode 100644
index 0000000000000..cee263c56826f
--- /dev/null
+++ b/lib/libthr/thread/thr_symbols.c
@@ -0,0 +1,61 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2004 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <stddef.h>
+#include <pthread.h>
+#include <rtld.h>
+
+#include "thr_private.h"
+
+/* A collection of symbols needed by debugger */
+
+/* int _libthr_debug */
+int _thread_off_tcb = offsetof(struct pthread, tcb);
+int _thread_off_tid = offsetof(struct pthread, tid);
+int _thread_off_next = offsetof(struct pthread, tle.tqe_next);
+int _thread_off_attr_flags = offsetof(struct pthread, attr.flags);
+int _thread_off_linkmap = offsetof(Obj_Entry, linkmap);
+int _thread_off_tlsindex = offsetof(Obj_Entry, tlsindex);
+int _thread_off_report_events = offsetof(struct pthread, report_events);
+int _thread_off_event_mask = offsetof(struct pthread, event_mask);
+int _thread_off_event_buf = offsetof(struct pthread, event_buf);
+int _thread_size_key = sizeof(struct pthread_key);
+int _thread_off_key_allocated = offsetof(struct pthread_key, allocated);
+int _thread_off_key_destructor = offsetof(struct pthread_key, destructor);
+int _thread_max_keys = PTHREAD_KEYS_MAX;
+int _thread_off_dtv = DTV_OFFSET;
+int _thread_off_state = offsetof(struct pthread, state);
+int _thread_state_running = PS_RUNNING;
+int _thread_state_zoombie = PS_DEAD;
diff --git a/lib/libthr/thread/thr_syscalls.c b/lib/libthr/thread/thr_syscalls.c
new file mode 100644
index 0000000000000..025dfc75fab6e
--- /dev/null
+++ b/lib/libthr/thread/thr_syscalls.c
@@ -0,0 +1,694 @@
+/*
+ * Copyright (c) 2014 The FreeBSD Foundation.
+ * Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#include <sys/select.h>
+#include <sys/signalvar.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <aio.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <termios.h>
+#include <unistd.h>
+#include <pthread.h>
+#include "un-namespace.h"
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+static int
+__thr_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_accept(s, addr, addrlen);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * If thread is canceled, no socket is created.
+ */
+static int
+__thr_accept4(int s, struct sockaddr *addr, socklen_t *addrlen, int flags)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_accept4(s, addr, addrlen, flags);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+static int
+__thr_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
+ timespec *timeout)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_aio_suspend(iocbs, niocb, timeout);
+ _thr_cancel_leave(curthread, 1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * According to manual of close(), the file descriptor is always deleted.
+ * Here, thread is only canceled after the system call, so the file
+ * descriptor is always deleted despite whether the thread is canceled
+ * or not.
+ */
+static int
+__thr_close(int fd)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter2(curthread, 0);
+ ret = __sys_close(fd);
+ _thr_cancel_leave(curthread, 1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * If the thread is canceled, connection is not made.
+ */
+static int
+__thr_connect(int fd, const struct sockaddr *name, socklen_t namelen)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_connect(fd, name, namelen);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * According to specification, only F_SETLKW is a cancellation point.
+ * Thread is only canceled at start, or canceled if the system call
+ * is failure, this means the function does not generate side effect
+ * if it is canceled.
+ */
+static int
+__thr_fcntl(int fd, int cmd, ...)
+{
+ struct pthread *curthread;
+ int ret;
+ va_list ap;
+
+ curthread = _get_curthread();
+ va_start(ap, cmd);
+ if (cmd == F_OSETLKW || cmd == F_SETLKW) {
+ _thr_cancel_enter(curthread);
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
+ _thr_cancel_leave(curthread, ret == -1);
+ } else {
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
+ }
+ va_end(ap);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled after system call.
+ */
+static int
+__thr_fsync(int fd)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter2(curthread, 0);
+ ret = __sys_fsync(fd);
+ _thr_cancel_leave(curthread, 1);
+
+ return (ret);
+}
+
+static int
+__thr_fdatasync(int fd)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter2(curthread, 0);
+ ret = __sys_fdatasync(fd);
+ _thr_cancel_leave(curthread, 1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled after system call.
+ */
+static int
+__thr_msync(void *addr, size_t len, int flags)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter2(curthread, 0);
+ ret = __sys_msync(addr, len, flags);
+ _thr_cancel_leave(curthread, 1);
+
+ return (ret);
+}
+
+static int
+__thr_clock_nanosleep(clockid_t clock_id, int flags,
+ const struct timespec *time_to_sleep, struct timespec *time_remaining)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_clock_nanosleep(clock_id, flags, time_to_sleep,
+ time_remaining);
+ _thr_cancel_leave(curthread, 1);
+
+ return (ret);
+}
+
+static int
+__thr_nanosleep(const struct timespec *time_to_sleep,
+ struct timespec *time_remaining)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_nanosleep(time_to_sleep, time_remaining);
+ _thr_cancel_leave(curthread, 1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * If the thread is canceled, file is not opened.
+ */
+static int
+__thr_openat(int fd, const char *path, int flags, ...)
+{
+ struct pthread *curthread;
+ int mode, ret;
+ va_list ap;
+
+
+ /* Check if the file is being created: */
+ if ((flags & O_CREAT) != 0) {
+ /* Get the creation mode: */
+ va_start(ap, flags);
+ mode = va_arg(ap, int);
+ va_end(ap);
+ } else {
+ mode = 0;
+ }
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_openat(fd, path, flags, mode);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call returns something,
+ * the thread is not canceled.
+ */
+static int
+__thr_poll(struct pollfd *fds, unsigned int nfds, int timeout)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_poll(fds, nfds, timeout);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call returns something,
+ * the thread is not canceled.
+ */
+static int
+__thr_ppoll(struct pollfd pfd[], nfds_t nfds, const struct timespec *
+ timeout, const sigset_t *newsigmask)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_ppoll(pfd, nfds, timeout, newsigmask);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call returns something,
+ * the thread is not canceled.
+ */
+static int
+__thr_pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
+ const struct timespec *timo, const sigset_t *mask)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_pselect(count, rfds, wfds, efds, timo, mask);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+static int
+__thr_kevent(int kq, const struct kevent *changelist, int nchanges,
+ struct kevent *eventlist, int nevents, const struct timespec *timeout)
+{
+ struct pthread *curthread;
+ int ret;
+
+ if (nevents == 0) {
+ /*
+ * No blocking, do not make the call cancellable.
+ */
+ return (__sys_kevent(kq, changelist, nchanges, eventlist,
+ nevents, timeout));
+ }
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_kevent(kq, changelist, nchanges, eventlist, nevents,
+ timeout);
+ _thr_cancel_leave(curthread, ret == -1 && nchanges == 0);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call got some data,
+ * the thread is not canceled.
+ */
+static ssize_t
+__thr_read(int fd, void *buf, size_t nbytes)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_read(fd, buf, nbytes);
+ _thr_cancel_leave(curthread, ret == -1);
+
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call got some data,
+ * the thread is not canceled.
+ */
+static ssize_t
+__thr_readv(int fd, const struct iovec *iov, int iovcnt)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_readv(fd, iov, iovcnt);
+ _thr_cancel_leave(curthread, ret == -1);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call got some data,
+ * the thread is not canceled.
+ */
+static ssize_t
+__thr_recvfrom(int s, void *b, size_t l, int f, struct sockaddr *from,
+ socklen_t *fl)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_recvfrom(s, b, l, f, from, fl);
+ _thr_cancel_leave(curthread, ret == -1);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call got some data,
+ * the thread is not canceled.
+ */
+static ssize_t
+__thr_recvmsg(int s, struct msghdr *m, int f)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_recvmsg(s, m, f);
+ _thr_cancel_leave(curthread, ret == -1);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call returns something,
+ * the thread is not canceled.
+ */
+static int
+__thr_select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
+ struct timeval *timeout)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
+ _thr_cancel_leave(curthread, ret == -1);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call sent
+ * data, the thread is not canceled.
+ */
+static ssize_t
+__thr_sendmsg(int s, const struct msghdr *m, int f)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_sendmsg(s, m, f);
+ _thr_cancel_leave(curthread, ret <= 0);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call sent some
+ * data, the thread is not canceled.
+ */
+static ssize_t
+__thr_sendto(int s, const void *m, size_t l, int f, const struct sockaddr *t,
+ socklen_t tl)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_sendto(s, m, l, f, t, tl);
+ _thr_cancel_leave(curthread, ret <= 0);
+ return (ret);
+}
+
+static int
+__thr_system(const char *string)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __libc_system(string);
+ _thr_cancel_leave(curthread, 1);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * If thread is canceled, the system call is not completed,
+ * this means not all bytes were drained.
+ */
+static int
+__thr_tcdrain(int fd)
+{
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __libc_tcdrain(fd);
+ _thr_cancel_leave(curthread, ret == -1);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call returns
+ * a child pid, the thread is not canceled.
+ */
+static pid_t
+__thr_wait4(pid_t pid, int *status, int options, struct rusage *rusage)
+{
+ struct pthread *curthread;
+ pid_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_wait4(pid, status, options, rusage);
+ _thr_cancel_leave(curthread, ret <= 0);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the system call returns
+ * a child pid, the thread is not canceled.
+ */
+static pid_t
+__thr_wait6(idtype_t idtype, id_t id, int *status, int options,
+ struct __wrusage *ru, siginfo_t *infop)
+{
+ struct pthread *curthread;
+ pid_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_wait6(idtype, id, status, options, ru, infop);
+ _thr_cancel_leave(curthread, ret <= 0);
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the thread wrote some data,
+ * it is not canceled.
+ */
+static ssize_t
+__thr_write(int fd, const void *buf, size_t nbytes)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_write(fd, buf, nbytes);
+ _thr_cancel_leave(curthread, (ret <= 0));
+ return (ret);
+}
+
+/*
+ * Cancellation behavior:
+ * Thread may be canceled at start, but if the thread wrote some data,
+ * it is not canceled.
+ */
+static ssize_t
+__thr_writev(int fd, const struct iovec *iov, int iovcnt)
+{
+ struct pthread *curthread;
+ ssize_t ret;
+
+ curthread = _get_curthread();
+ _thr_cancel_enter(curthread);
+ ret = __sys_writev(fd, iov, iovcnt);
+ _thr_cancel_leave(curthread, (ret <= 0));
+ return (ret);
+}
+
+void
+__thr_interpose_libc(void)
+{
+
+ __set_error_selector(__error_threaded);
+#define SLOT(name) \
+ *(__libc_interposing_slot(INTERPOS_##name)) = \
+ (interpos_func_t)__thr_##name;
+ SLOT(accept);
+ SLOT(accept4);
+ SLOT(aio_suspend);
+ SLOT(close);
+ SLOT(connect);
+ SLOT(fcntl);
+ SLOT(fsync);
+ SLOT(fork);
+ SLOT(msync);
+ SLOT(nanosleep);
+ SLOT(openat);
+ SLOT(poll);
+ SLOT(pselect);
+ SLOT(read);
+ SLOT(readv);
+ SLOT(recvfrom);
+ SLOT(recvmsg);
+ SLOT(select);
+ SLOT(sendmsg);
+ SLOT(sendto);
+ SLOT(setcontext);
+ SLOT(sigaction);
+ SLOT(sigprocmask);
+ SLOT(sigsuspend);
+ SLOT(sigwait);
+ SLOT(sigtimedwait);
+ SLOT(sigwaitinfo);
+ SLOT(swapcontext);
+ SLOT(system);
+ SLOT(tcdrain);
+ SLOT(wait4);
+ SLOT(write);
+ SLOT(writev);
+ SLOT(spinlock);
+ SLOT(spinunlock);
+ SLOT(kevent);
+ SLOT(wait6);
+ SLOT(ppoll);
+ SLOT(map_stacks_exec);
+ SLOT(fdatasync);
+ SLOT(clock_nanosleep);
+#undef SLOT
+ *(__libc_interposing_slot(
+ INTERPOS__pthread_mutex_init_calloc_cb)) =
+ (interpos_func_t)_pthread_mutex_init_calloc_cb;
+}
diff --git a/lib/libthr/thread/thr_umtx.c b/lib/libthr/thread/thr_umtx.c
new file mode 100644
index 0000000000000..86e5a1f137802
--- /dev/null
+++ b/lib/libthr/thread/thr_umtx.c
@@ -0,0 +1,376 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "thr_private.h"
+#include "thr_umtx.h"
+
+#ifndef HAS__UMTX_OP_ERR
+int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
+{
+
+ if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
+ return (errno);
+ return (0);
+}
+#endif
+
+void
+_thr_umutex_init(struct umutex *mtx)
+{
+ static const struct umutex default_mtx = DEFAULT_UMUTEX;
+
+ *mtx = default_mtx;
+}
+
+void
+_thr_urwlock_init(struct urwlock *rwl)
+{
+ static const struct urwlock default_rwl = DEFAULT_URWLOCK;
+
+ *rwl = default_rwl;
+}
+
+int
+__thr_umutex_lock(struct umutex *mtx, uint32_t id)
+{
+ uint32_t owner;
+
+ if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
+ return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
+
+ for (;;) {
+ owner = mtx->m_owner;
+ if ((owner & ~UMUTEX_CONTESTED) == 0 &&
+ atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner))
+ return (0);
+ if (owner == UMUTEX_RB_OWNERDEAD &&
+ atomic_cmpset_acq_32(&mtx->m_owner, owner,
+ id | UMUTEX_CONTESTED))
+ return (EOWNERDEAD);
+ if (owner == UMUTEX_RB_NOTRECOV)
+ return (ENOTRECOVERABLE);
+
+ /* wait in kernel */
+ _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
+ }
+}
+
+#define SPINLOOPS 1000
+
+int
+__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
+{
+ uint32_t owner;
+ int count;
+
+ if (!_thr_is_smp)
+ return (__thr_umutex_lock(mtx, id));
+ if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
+ return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
+
+ for (;;) {
+ count = SPINLOOPS;
+ while (count--) {
+ owner = mtx->m_owner;
+ if ((owner & ~UMUTEX_CONTESTED) == 0 &&
+ atomic_cmpset_acq_32(&mtx->m_owner, owner,
+ id | owner))
+ return (0);
+ if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
+ atomic_cmpset_acq_32(&mtx->m_owner, owner,
+ id | UMUTEX_CONTESTED))
+ return (EOWNERDEAD);
+ if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
+ return (ENOTRECOVERABLE);
+ CPU_SPINWAIT;
+ }
+
+ /* wait in kernel */
+ _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
+ }
+}
+
+int
+__thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
+ const struct timespec *abstime)
+{
+ struct _umtx_time *tm_p, timeout;
+ size_t tm_size;
+ uint32_t owner;
+ int ret;
+
+ if (abstime == NULL) {
+ tm_p = NULL;
+ tm_size = 0;
+ } else {
+ timeout._clockid = CLOCK_REALTIME;
+ timeout._flags = UMTX_ABSTIME;
+ timeout._timeout = *abstime;
+ tm_p = &timeout;
+ tm_size = sizeof(timeout);
+ }
+
+ for (;;) {
+ if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
+ UMUTEX_PRIO_INHERIT)) == 0) {
+ /* try to lock it */
+ owner = mtx->m_owner;
+ if ((owner & ~UMUTEX_CONTESTED) == 0 &&
+ atomic_cmpset_acq_32(&mtx->m_owner, owner,
+ id | owner))
+ return (0);
+ if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
+ atomic_cmpset_acq_32(&mtx->m_owner, owner,
+ id | UMUTEX_CONTESTED))
+ return (EOWNERDEAD);
+ if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
+ return (ENOTRECOVERABLE);
+ /* wait in kernel */
+ ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
+ (void *)tm_size, __DECONST(void *, tm_p));
+ } else {
+ ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
+ (void *)tm_size, __DECONST(void *, tm_p));
+ if (ret == 0 || ret == EOWNERDEAD ||
+ ret == ENOTRECOVERABLE)
+ break;
+ }
+ if (ret == ETIMEDOUT)
+ break;
+ }
+ return (ret);
+}
+
+int
+__thr_umutex_unlock(struct umutex *mtx)
+{
+
+ return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0));
+}
+
+int
+__thr_umutex_trylock(struct umutex *mtx)
+{
+
+ return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0));
+}
+
+int
+__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
+ uint32_t *oldceiling)
+{
+
+ return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0));
+}
+
+int
+_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
+{
+
+ if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
+ timeout->tv_nsec <= 0)))
+ return (ETIMEDOUT);
+ return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
+ __DECONST(void*, timeout)));
+}
+
+int
+_thr_umtx_wait_uint(volatile u_int *mtx, u_int id,
+ const struct timespec *timeout, int shared)
+{
+
+ if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
+ timeout->tv_nsec <= 0)))
+ return (ETIMEDOUT);
+ return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
+ UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
+ __DECONST(void*, timeout)));
+}
+
+int
+_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
+ const struct timespec *abstime, int shared)
+{
+ struct _umtx_time *tm_p, timeout;
+ size_t tm_size;
+
+ if (abstime == NULL) {
+ tm_p = NULL;
+ tm_size = 0;
+ } else {
+ timeout._clockid = clockid;
+ timeout._flags = UMTX_ABSTIME;
+ timeout._timeout = *abstime;
+ tm_p = &timeout;
+ tm_size = sizeof(timeout);
+ }
+
+ return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
+ UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
+ (void *)tm_size, __DECONST(void *, tm_p)));
+}
+
+int
+_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
+{
+
+ return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
+ UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0));
+}
+
+void
+_thr_ucond_init(struct ucond *cv)
+{
+
+ bzero(cv, sizeof(struct ucond));
+}
+
+int
+_thr_ucond_wait(struct ucond *cv, struct umutex *m,
+ const struct timespec *timeout, int flags)
+{
+ struct pthread *curthread;
+
+ if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
+ timeout->tv_nsec <= 0))) {
+ curthread = _get_curthread();
+ _thr_umutex_unlock(m, TID(curthread));
+ return (ETIMEDOUT);
+ }
+ return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m,
+ __DECONST(void*, timeout)));
+}
+
+int
+_thr_ucond_signal(struct ucond *cv)
+{
+
+ if (!cv->c_has_waiters)
+ return (0);
+ return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL));
+}
+
+int
+_thr_ucond_broadcast(struct ucond *cv)
+{
+
+ if (!cv->c_has_waiters)
+ return (0);
+ return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL));
+}
+
+int
+__thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
+ const struct timespec *tsp)
+{
+ struct _umtx_time timeout, *tm_p;
+ size_t tm_size;
+
+ if (tsp == NULL) {
+ tm_p = NULL;
+ tm_size = 0;
+ } else {
+ timeout._timeout = *tsp;
+ timeout._flags = UMTX_ABSTIME;
+ timeout._clockid = CLOCK_REALTIME;
+ tm_p = &timeout;
+ tm_size = sizeof(timeout);
+ }
+ return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags,
+ (void *)tm_size, tm_p));
+}
+
+int
+__thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
+{
+ struct _umtx_time timeout, *tm_p;
+ size_t tm_size;
+
+ if (tsp == NULL) {
+ tm_p = NULL;
+ tm_size = 0;
+ } else {
+ timeout._timeout = *tsp;
+ timeout._flags = UMTX_ABSTIME;
+ timeout._clockid = CLOCK_REALTIME;
+ tm_p = &timeout;
+ tm_size = sizeof(timeout);
+ }
+ return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size,
+ tm_p));
+}
+
+int
+__thr_rwlock_unlock(struct urwlock *rwlock)
+{
+
+ return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL));
+}
+
+void
+_thr_rwl_rdlock(struct urwlock *rwlock)
+{
+ int ret;
+
+ for (;;) {
+ if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
+ return;
+ ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
+ if (ret == 0)
+ return;
+ if (ret != EINTR)
+ PANIC("rdlock error");
+ }
+}
+
+void
+_thr_rwl_wrlock(struct urwlock *rwlock)
+{
+ int ret;
+
+ for (;;) {
+ if (_thr_rwlock_trywrlock(rwlock) == 0)
+ return;
+ ret = __thr_rwlock_wrlock(rwlock, NULL);
+ if (ret == 0)
+ return;
+ if (ret != EINTR)
+ PANIC("wrlock error");
+ }
+}
+
+void
+_thr_rwl_unlock(struct urwlock *rwlock)
+{
+
+ if (_thr_rwlock_unlock(rwlock))
+ PANIC("unlock error");
+}
diff --git a/lib/libthr/thread/thr_umtx.h b/lib/libthr/thread/thr_umtx.h
new file mode 100644
index 0000000000000..1017214adb5c7
--- /dev/null
+++ b/lib/libthr/thread/thr_umtx.h
@@ -0,0 +1,272 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _THR_FBSD_UMTX_H_
+#define _THR_FBSD_UMTX_H_
+
+#include <strings.h>
+#include <sys/umtx.h>
+
+#ifdef __LP64__
+#define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0}}
+#else
+#define DEFAULT_UMUTEX {0,0,{0,0},0,0,{0,0}}
+#endif
+#define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}}
+
+int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden;
+int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden;
+int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden;
+int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
+ const struct timespec *timeout) __hidden;
+int __thr_umutex_unlock(struct umutex *mtx) __hidden;
+int __thr_umutex_trylock(struct umutex *mtx) __hidden;
+int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
+ uint32_t *oldceiling) __hidden;
+
+void _thr_umutex_init(struct umutex *mtx) __hidden;
+void _thr_urwlock_init(struct urwlock *rwl) __hidden;
+
+int _thr_umtx_wait(volatile long *mtx, long exp,
+ const struct timespec *timeout) __hidden;
+int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp,
+ const struct timespec *timeout, int shared) __hidden;
+int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid,
+ const struct timespec *timeout, int shared) __hidden;
+int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden;
+int _thr_ucond_wait(struct ucond *cv, struct umutex *m,
+ const struct timespec *timeout, int flags) __hidden;
+void _thr_ucond_init(struct ucond *cv) __hidden;
+int _thr_ucond_signal(struct ucond *cv) __hidden;
+int _thr_ucond_broadcast(struct ucond *cv) __hidden;
+
+int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
+ const struct timespec *tsp) __hidden;
+int __thr_rwlock_wrlock(struct urwlock *rwlock,
+ const struct timespec *tsp) __hidden;
+int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden;
+
+/* Internal used only */
+void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden;
+void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden;
+void _thr_rwl_unlock(struct urwlock *rwlock) __hidden;
+
+static inline int
+_thr_umutex_trylock(struct umutex *mtx, uint32_t id)
+{
+
+ if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
+ return (0);
+ if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
+ atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
+ id | UMUTEX_CONTESTED))
+ return (EOWNERDEAD);
+ if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
+ return (ENOTRECOVERABLE);
+ if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
+ return (EBUSY);
+ return (__thr_umutex_trylock(mtx));
+}
+
+static inline int
+_thr_umutex_trylock2(struct umutex *mtx, uint32_t id)
+{
+
+ if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
+ return (0);
+ if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
+ __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
+ UMUTEX_PRIO_INHERIT)) == 0) &&
+ atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED,
+ id | UMUTEX_CONTESTED))
+ return (0);
+ if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
+ atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
+ id | UMUTEX_CONTESTED))
+ return (EOWNERDEAD);
+ if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
+ return (ENOTRECOVERABLE);
+ return (EBUSY);
+}
+
+static inline int
+_thr_umutex_lock(struct umutex *mtx, uint32_t id)
+{
+
+ if (_thr_umutex_trylock2(mtx, id) == 0)
+ return (0);
+ return (__thr_umutex_lock(mtx, id));
+}
+
+static inline int
+_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
+{
+
+ if (_thr_umutex_trylock2(mtx, id) == 0)
+ return (0);
+ return (__thr_umutex_lock_spin(mtx, id));
+}
+
+static inline int
+_thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
+ const struct timespec *timeout)
+{
+
+ if (_thr_umutex_trylock2(mtx, id) == 0)
+ return (0);
+ return (__thr_umutex_timedlock(mtx, id, timeout));
+}
+
+static inline int
+_thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer)
+{
+ uint32_t flags, owner;
+ bool noncst;
+
+ flags = mtx->m_flags;
+ noncst = (flags & UMUTEX_NONCONSISTENT) != 0;
+
+ if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) {
+ if (atomic_cmpset_rel_32(&mtx->m_owner, id, noncst ?
+ UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED))
+ return (0);
+ return (__thr_umutex_unlock(mtx));
+ }
+
+ do {
+ owner = mtx->m_owner;
+ if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
+ return (EPERM);
+ } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner,
+ noncst ? UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED)));
+ if ((owner & UMUTEX_CONTESTED) != 0) {
+ if (defer == NULL || noncst)
+ (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2,
+ flags, 0, 0);
+ else
+ *defer = 1;
+ }
+ return (0);
+}
+
+static inline int
+_thr_umutex_unlock(struct umutex *mtx, uint32_t id)
+{
+
+ return (_thr_umutex_unlock2(mtx, id, NULL));
+}
+
+static inline int
+_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
+{
+ int32_t state, wrflags;
+
+ if ((flags & URWLOCK_PREFER_READER) != 0 ||
+ (rwlock->rw_flags & URWLOCK_PREFER_READER) != 0)
+ wrflags = URWLOCK_WRITE_OWNER;
+ else
+ wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
+ state = rwlock->rw_state;
+ while (!(state & wrflags)) {
+ if (__predict_false(URWLOCK_READER_COUNT(state) ==
+ URWLOCK_MAX_READERS))
+ return (EAGAIN);
+ if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
+ return (0);
+ state = rwlock->rw_state;
+ }
+
+ return (EBUSY);
+}
+
+static inline int
+_thr_rwlock_trywrlock(struct urwlock *rwlock)
+{
+ int32_t state;
+
+ state = rwlock->rw_state;
+ while ((state & URWLOCK_WRITE_OWNER) == 0 &&
+ URWLOCK_READER_COUNT(state) == 0) {
+ if (atomic_cmpset_acq_32(&rwlock->rw_state, state,
+ state | URWLOCK_WRITE_OWNER))
+ return (0);
+ state = rwlock->rw_state;
+ }
+
+ return (EBUSY);
+}
+
+static inline int
+_thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
+{
+
+ if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
+ return (0);
+ return (__thr_rwlock_rdlock(rwlock, flags, tsp));
+}
+
+static inline int
+_thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
+{
+
+ if (_thr_rwlock_trywrlock(rwlock) == 0)
+ return (0);
+ return (__thr_rwlock_wrlock(rwlock, tsp));
+}
+
+static inline int
+_thr_rwlock_unlock(struct urwlock *rwlock)
+{
+ int32_t state;
+
+ state = rwlock->rw_state;
+ if ((state & URWLOCK_WRITE_OWNER) != 0) {
+ if (atomic_cmpset_rel_32(&rwlock->rw_state,
+ URWLOCK_WRITE_OWNER, 0))
+ return (0);
+ } else {
+ for (;;) {
+ if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
+ return (EPERM);
+ if (!((state & (URWLOCK_WRITE_WAITERS |
+ URWLOCK_READ_WAITERS)) != 0 &&
+ URWLOCK_READER_COUNT(state) == 1)) {
+ if (atomic_cmpset_rel_32(&rwlock->rw_state,
+ state, state - 1))
+ return (0);
+ state = rwlock->rw_state;
+ } else {
+ break;
+ }
+ }
+ }
+ return (__thr_rwlock_unlock(rwlock));
+}
+#endif
diff --git a/lib/libthr/thread/thr_yield.c b/lib/libthr/thread/thr_yield.c
new file mode 100644
index 0000000000000..0d666d52a5547
--- /dev/null
+++ b/lib/libthr/thread/thr_yield.c
@@ -0,0 +1,48 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "namespace.h"
+#include <pthread.h>
+#include <sched.h>
+#include "un-namespace.h"
+
+__weak_reference(_pthread_yield, pthread_yield);
+
+/* Draft 4 yield */
+void
+_pthread_yield(void)
+{
+
+ sched_yield();
+}