summaryrefslogtreecommitdiff
path: root/runtime/src/kmp_dispatch.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/src/kmp_dispatch.h')
-rw-r--r--runtime/src/kmp_dispatch.h30
1 files changed, 11 insertions, 19 deletions
diff --git a/runtime/src/kmp_dispatch.h b/runtime/src/kmp_dispatch.h
index 955807113ef1a..8b3e98435a3f6 100644
--- a/runtime/src/kmp_dispatch.h
+++ b/runtime/src/kmp_dispatch.h
@@ -4,10 +4,9 @@
//===----------------------------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.txt for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -172,11 +171,9 @@ template <typename T> struct dispatch_shared_info_template {
dispatch_shared_info64_t s64;
} u;
volatile kmp_uint32 buffer_index;
-#if OMP_45_ENABLED
volatile kmp_int32 doacross_buf_idx; // teamwise index
kmp_uint32 *doacross_flags; // array of iteration flags (0/1)
kmp_int32 doacross_num_done; // count finished threads
-#endif
#if KMP_USE_HIER_SCHED
kmp_hier_t<T> *hier;
#endif
@@ -270,7 +267,7 @@ template <typename T> kmp_uint32 __kmp_eq(T value, T checker) {
}
/*
- Spin wait loop that first does pause, then yield.
+ Spin wait loop that pauses between checks.
Waits until function returns non-zero when called with *spinner and check.
Does NOT put threads to sleep.
Arguments:
@@ -283,15 +280,14 @@ template <typename T> kmp_uint32 __kmp_eq(T value, T checker) {
is used to report locks consistently. For example, if lock is acquired
immediately, its address is reported to ittnotify via
KMP_FSYNC_ACQUIRED(). However, it lock cannot be acquired immediately
- and lock routine calls to KMP_WAIT_YIELD(), the later should report the
+ and lock routine calls to KMP_WAIT(), the later should report the
same address, not an address of low-level spinner.
#endif // USE_ITT_BUILD
TODO: make inline function (move to header file for icl)
*/
template <typename UT>
-static UT __kmp_wait_yield(volatile UT *spinner, UT checker,
- kmp_uint32 (*pred)(UT, UT)
- USE_ITT_BUILD_ARG(void *obj)) {
+static UT __kmp_wait(volatile UT *spinner, UT checker,
+ kmp_uint32 (*pred)(UT, UT) USE_ITT_BUILD_ARG(void *obj)) {
// note: we may not belong to a team at this point
volatile UT *spin = spinner;
UT check = checker;
@@ -309,12 +305,8 @@ static UT __kmp_wait_yield(volatile UT *spinner, UT checker,
It causes problems with infinite recursion because of exit lock */
/* if ( TCR_4(__kmp_global.g.g_done) && __kmp_global.g.g_abort)
__kmp_abort_thread(); */
-
- // if we are oversubscribed,
- // or have waited a bit (and KMP_LIBRARY=throughput, then yield
- // pause is in the following code
- KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc);
- KMP_YIELD_SPIN(spins);
+ // If oversubscribed, or have waited a bit then yield.
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
}
KMP_FSYNC_SPIN_ACQUIRED(obj);
return r;
@@ -380,8 +372,8 @@ void __kmp_dispatch_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
__kmp_str_free(&buff);
}
#endif
- __kmp_wait_yield<UT>(&sh->u.s.ordered_iteration, lower,
- __kmp_ge<UT> USE_ITT_BUILD_ARG(NULL));
+ __kmp_wait<UT>(&sh->u.s.ordered_iteration, lower,
+ __kmp_ge<UT> USE_ITT_BUILD_ARG(NULL));
KMP_MB(); /* is this necessary? */
#ifdef KMP_DEBUG
{