summaryrefslogtreecommitdiff
path: root/runtime/src/kmp_barrier.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/src/kmp_barrier.cpp')
-rw-r--r--runtime/src/kmp_barrier.cpp312
1 files changed, 203 insertions, 109 deletions
diff --git a/runtime/src/kmp_barrier.cpp b/runtime/src/kmp_barrier.cpp
index b8e8d933905a0..e17986b16a95d 100644
--- a/runtime/src/kmp_barrier.cpp
+++ b/runtime/src/kmp_barrier.cpp
@@ -4,10 +4,9 @@
//===----------------------------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.txt for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -45,7 +44,8 @@ void __kmp_print_structure(void); // Forward declaration
// ---------------------------- Barrier Algorithms ----------------------------
// Linear Barrier
-static void __kmp_linear_barrier_gather(
+template <bool cancellable = false>
+static bool __kmp_linear_barrier_gather_template(
enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
void (*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj)) {
KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_linear_gather);
@@ -105,7 +105,14 @@ static void __kmp_linear_barrier_gather(
// Wait for worker thread to arrive
kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived,
new_state);
- flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
+ if (cancellable) {
+ bool cancelled = flag.wait_cancellable_nosleep(
+ this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
+ if (cancelled)
+ return true;
+ } else {
+ flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
+ }
ANNOTATE_BARRIER_END(other_threads[i]);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Barrier imbalance - write min of the thread time and the other thread
@@ -138,9 +145,11 @@ static void __kmp_linear_barrier_gather(
20,
("__kmp_linear_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
gtid, team->t.t_id, tid, bt));
+ return false;
}
-static void __kmp_linear_barrier_release(
+template <bool cancellable = false>
+static bool __kmp_linear_barrier_release_template(
enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj)) {
KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_linear_release);
@@ -202,7 +211,15 @@ static void __kmp_linear_barrier_release(
KA_TRACE(20, ("__kmp_linear_barrier_release: T#%d wait go(%p) == %u\n",
gtid, &thr_bar->b_go, KMP_BARRIER_STATE_BUMP));
kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
- flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
+ if (cancellable) {
+ bool cancelled = flag.wait_cancellable_nosleep(
+ this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
+ if (cancelled) {
+ return true;
+ }
+ } else {
+ flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
+ }
ANNOTATE_BARRIER_END(this_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
@@ -213,7 +230,7 @@ static void __kmp_linear_barrier_release(
__kmp_itt_task_starting(itt_sync_obj);
if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
- return;
+ return false;
itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
if (itt_sync_obj != NULL)
@@ -223,7 +240,7 @@ static void __kmp_linear_barrier_release(
#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
// Early exit for reaping threads releasing forkjoin barrier
if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
- return;
+ return false;
// The worker thread may now assume that the team is valid.
#ifdef KMP_DEBUG
tid = __kmp_tid_from_gtid(gtid);
@@ -240,6 +257,35 @@ static void __kmp_linear_barrier_release(
20,
("__kmp_linear_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
gtid, team->t.t_id, tid, bt));
+ return false;
+}
+
+static void __kmp_linear_barrier_gather(
+ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
+ void (*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj)) {
+ __kmp_linear_barrier_gather_template<false>(
+ bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
+}
+
+static bool __kmp_linear_barrier_gather_cancellable(
+ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
+ void (*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj)) {
+ return __kmp_linear_barrier_gather_template<true>(
+ bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
+}
+
+static void __kmp_linear_barrier_release(
+ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
+ int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj)) {
+ __kmp_linear_barrier_release_template<false>(
+ bt, this_thr, gtid, tid, propagate_icvs USE_ITT_BUILD_ARG(itt_sync_obj));
+}
+
+static bool __kmp_linear_barrier_release_cancellable(
+ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
+ int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj)) {
+ return __kmp_linear_barrier_release_template<true>(
+ bt, this_thr, gtid, tid, propagate_icvs USE_ITT_BUILD_ARG(itt_sync_obj));
}
// Tree barrier
@@ -823,12 +869,10 @@ static void __kmp_hierarchical_barrier_gather(
kmp_uint64 new_state;
int level = team->t.t_level;
-#if OMP_40_ENABLED
if (other_threads[0]
->th.th_teams_microtask) // are we inside the teams construct?
if (this_thr->th.th_teams_size.nteams > 1)
++level; // level was not increased in teams construct for team_of_masters
-#endif
if (level == 1)
thr_bar->use_oncore_barrier = 1;
else
@@ -1053,7 +1097,6 @@ static void __kmp_hierarchical_barrier_release(
nproc = this_thr->th.th_team_nproc;
int level = team->t.t_level;
-#if OMP_40_ENABLED
if (team->t.t_threads[0]
->th.th_teams_microtask) { // are we inside the teams construct?
if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
@@ -1062,7 +1105,6 @@ static void __kmp_hierarchical_barrier_release(
if (this_thr->th.th_teams_size.nteams > 1)
++level; // level was not increased in teams construct for team_of_masters
}
-#endif
if (level == 1)
thr_bar->use_oncore_barrier = 1;
else
@@ -1209,24 +1251,49 @@ static void __kmp_hierarchical_barrier_release(
// End of Barrier Algorithms
+// type traits for cancellable value
+// if cancellable is true, then is_cancellable is a normal boolean variable
+// if cancellable is false, then is_cancellable is a compile time constant
+template <bool cancellable> struct is_cancellable {};
+template <> struct is_cancellable<true> {
+ bool value;
+ is_cancellable() : value(false) {}
+ is_cancellable(bool b) : value(b) {}
+ is_cancellable &operator=(bool b) {
+ value = b;
+ return *this;
+ }
+ operator bool() const { return value; }
+};
+template <> struct is_cancellable<false> {
+ is_cancellable &operator=(bool b) { return *this; }
+ constexpr operator bool() const { return false; }
+};
+
// Internal function to do a barrier.
/* If is_split is true, do a split barrier, otherwise, do a plain barrier
If reduce is non-NULL, do a split reduction barrier, otherwise, do a split
barrier
- Returns 0 if master thread, 1 if worker thread. */
-int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
- size_t reduce_size, void *reduce_data,
- void (*reduce)(void *, void *)) {
+ When cancellable = false,
+ Returns 0 if master thread, 1 if worker thread.
+ When cancellable = true
+ Returns 0 if not cancelled, 1 if cancelled. */
+template <bool cancellable = false>
+static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split,
+ size_t reduce_size, void *reduce_data,
+ void (*reduce)(void *, void *)) {
KMP_TIME_PARTITIONED_BLOCK(OMP_plain_barrier);
KMP_SET_THREAD_STATE_BLOCK(PLAIN_BARRIER);
int tid = __kmp_tid_from_gtid(gtid);
kmp_info_t *this_thr = __kmp_threads[gtid];
kmp_team_t *team = this_thr->th.th_team;
int status = 0;
+ is_cancellable<cancellable> cancelled;
#if OMPT_SUPPORT && OMPT_OPTIONAL
ompt_data_t *my_task_data;
ompt_data_t *my_parallel_data;
void *return_address;
+ ompt_sync_region_t barrier_kind;
#endif
KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) has arrived\n", gtid,
@@ -1239,15 +1306,16 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
my_task_data = OMPT_CUR_TASK_DATA(this_thr);
my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
+ barrier_kind = __ompt_get_barrier_kind(bt, this_thr);
if (ompt_enabled.ompt_callback_sync_region) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
- ompt_sync_region_barrier, ompt_scope_begin, my_parallel_data,
- my_task_data, return_address);
+ barrier_kind, ompt_scope_begin, my_parallel_data, my_task_data,
+ return_address);
}
if (ompt_enabled.ompt_callback_sync_region_wait) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
- ompt_sync_region_barrier, ompt_scope_begin, my_parallel_data,
- my_task_data, return_address);
+ barrier_kind, ompt_scope_begin, my_parallel_data, my_task_data,
+ return_address);
}
#endif
// It is OK to report the barrier state after the barrier begin callback.
@@ -1306,41 +1374,45 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
}
if (KMP_MASTER_TID(tid) && __kmp_tasking_mode != tskm_immediate_exec)
- __kmp_task_team_setup(
- this_thr, team,
- 0); // use 0 to only setup the current team if nthreads > 1
-
- switch (__kmp_barrier_gather_pattern[bt]) {
- case bp_hyper_bar: {
- KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]); // don't set branch bits
- // to 0; use linear
- __kmp_hyper_barrier_gather(bt, this_thr, gtid, tid,
- reduce USE_ITT_BUILD_ARG(itt_sync_obj));
- break;
- }
- case bp_hierarchical_bar: {
- __kmp_hierarchical_barrier_gather(bt, this_thr, gtid, tid,
- reduce USE_ITT_BUILD_ARG(itt_sync_obj));
- break;
- }
- case bp_tree_bar: {
- KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]); // don't set branch bits
- // to 0; use linear
- __kmp_tree_barrier_gather(bt, this_thr, gtid, tid,
- reduce USE_ITT_BUILD_ARG(itt_sync_obj));
- break;
- }
- default: {
- __kmp_linear_barrier_gather(bt, this_thr, gtid, tid,
+ // use 0 to only setup the current team if nthreads > 1
+ __kmp_task_team_setup(this_thr, team, 0);
+
+ if (cancellable) {
+ cancelled = __kmp_linear_barrier_gather_cancellable(
+ bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
+ } else {
+ switch (__kmp_barrier_gather_pattern[bt]) {
+ case bp_hyper_bar: {
+ // don't set branch bits to 0; use linear
+ KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]);
+ __kmp_hyper_barrier_gather(bt, this_thr, gtid, tid,
+ reduce USE_ITT_BUILD_ARG(itt_sync_obj));
+ break;
+ }
+ case bp_hierarchical_bar: {
+ __kmp_hierarchical_barrier_gather(
+ bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
+ break;
+ }
+ case bp_tree_bar: {
+ // don't set branch bits to 0; use linear
+ KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]);
+ __kmp_tree_barrier_gather(bt, this_thr, gtid, tid,
reduce USE_ITT_BUILD_ARG(itt_sync_obj));
- }
+ break;
+ }
+ default: {
+ __kmp_linear_barrier_gather(bt, this_thr, gtid, tid,
+ reduce USE_ITT_BUILD_ARG(itt_sync_obj));
+ }
+ }
}
KMP_MB();
if (KMP_MASTER_TID(tid)) {
status = 0;
- if (__kmp_tasking_mode != tskm_immediate_exec) {
+ if (__kmp_tasking_mode != tskm_immediate_exec && !cancelled) {
__kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
}
#if USE_DEBUGGER
@@ -1349,13 +1421,14 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
team->t.t_bar[bt].b_team_arrived += 1;
#endif
-#if OMP_40_ENABLED
- kmp_int32 cancel_request = KMP_ATOMIC_LD_RLX(&team->t.t_cancel_request);
- // Reset cancellation flag for worksharing constructs
- if (cancel_request == cancel_loop || cancel_request == cancel_sections) {
- KMP_ATOMIC_ST_RLX(&team->t.t_cancel_request, cancel_noreq);
+ if (__kmp_omp_cancellation) {
+ kmp_int32 cancel_request = KMP_ATOMIC_LD_RLX(&team->t.t_cancel_request);
+ // Reset cancellation flag for worksharing constructs
+ if (cancel_request == cancel_loop ||
+ cancel_request == cancel_sections) {
+ KMP_ATOMIC_ST_RLX(&team->t.t_cancel_request, cancel_noreq);
+ }
}
-#endif
#if USE_ITT_BUILD
/* TODO: In case of split reduction barrier, master thread may send
acquired event early, before the final summation into the shared
@@ -1368,9 +1441,7 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
// Barrier - report frame end (only if active_level == 1)
if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
__kmp_forkjoin_frames_mode &&
-#if OMP_40_ENABLED
this_thr->th.th_teams_microtask == NULL &&
-#endif
team->t.t_active_level == 1) {
ident_t *loc = __kmp_threads[gtid]->th.th_ident;
kmp_uint64 cur_time = __itt_get_timestamp();
@@ -1417,31 +1488,36 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
__kmp_itt_barrier_middle(gtid, itt_sync_obj);
#endif /* USE_ITT_BUILD */
}
- if (status == 1 || !is_split) {
- switch (__kmp_barrier_release_pattern[bt]) {
- case bp_hyper_bar: {
- KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
- __kmp_hyper_barrier_release(bt, this_thr, gtid, tid,
- FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
- break;
- }
- case bp_hierarchical_bar: {
- __kmp_hierarchical_barrier_release(
+ if ((status == 1 || !is_split) && !cancelled) {
+ if (cancellable) {
+ cancelled = __kmp_linear_barrier_release_cancellable(
bt, this_thr, gtid, tid, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
- break;
- }
- case bp_tree_bar: {
- KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
- __kmp_tree_barrier_release(bt, this_thr, gtid, tid,
- FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
- break;
- }
- default: {
- __kmp_linear_barrier_release(bt, this_thr, gtid, tid,
+ } else {
+ switch (__kmp_barrier_release_pattern[bt]) {
+ case bp_hyper_bar: {
+ KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
+ __kmp_hyper_barrier_release(bt, this_thr, gtid, tid,
+ FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
+ break;
+ }
+ case bp_hierarchical_bar: {
+ __kmp_hierarchical_barrier_release(
+ bt, this_thr, gtid, tid, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
+ break;
+ }
+ case bp_tree_bar: {
+ KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
+ __kmp_tree_barrier_release(bt, this_thr, gtid, tid,
FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
+ break;
+ }
+ default: {
+ __kmp_linear_barrier_release(bt, this_thr, gtid, tid,
+ FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
+ }
+ }
}
- }
- if (__kmp_tasking_mode != tskm_immediate_exec) {
+ if (__kmp_tasking_mode != tskm_immediate_exec && !cancelled) {
__kmp_task_team_sync(this_thr, team);
}
}
@@ -1456,7 +1532,6 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
} else { // Team is serialized.
status = 0;
if (__kmp_tasking_mode != tskm_immediate_exec) {
-#if OMP_45_ENABLED
if (this_thr->th.th_task_team != NULL) {
#if USE_ITT_NOTIFY
void *itt_sync_obj = NULL;
@@ -1476,12 +1551,6 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
__kmp_itt_barrier_finished(gtid, itt_sync_obj);
#endif /* USE_ITT_BUILD */
}
-#else
- // The task team should be NULL for serialized code (tasks will be
- // executed immediately)
- KMP_DEBUG_ASSERT(team->t.t_task_team[this_thr->th.th_task_state] == NULL);
- KMP_DEBUG_ASSERT(this_thr->th.th_task_team == NULL);
-#endif
}
}
KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) is leaving with return value %d\n",
@@ -1493,13 +1562,13 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
#if OMPT_OPTIONAL
if (ompt_enabled.ompt_callback_sync_region_wait) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
- ompt_sync_region_barrier, ompt_scope_end, my_parallel_data,
- my_task_data, return_address);
+ barrier_kind, ompt_scope_end, my_parallel_data, my_task_data,
+ return_address);
}
if (ompt_enabled.ompt_callback_sync_region) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
- ompt_sync_region_barrier, ompt_scope_end, my_parallel_data,
- my_task_data, return_address);
+ barrier_kind, ompt_scope_end, my_parallel_data, my_task_data,
+ return_address);
}
#endif
this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
@@ -1507,9 +1576,43 @@ int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
#endif
ANNOTATE_BARRIER_END(&team->t.t_bar);
+ if (cancellable)
+ return (int)cancelled;
return status;
}
+// Returns 0 if master thread, 1 if worker thread.
+int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
+ size_t reduce_size, void *reduce_data,
+ void (*reduce)(void *, void *)) {
+ return __kmp_barrier_template<>(bt, gtid, is_split, reduce_size, reduce_data,
+ reduce);
+}
+
+#if defined(KMP_GOMP_COMPAT)
+// Returns 1 if cancelled, 0 otherwise
+int __kmp_barrier_gomp_cancel(int gtid) {
+ if (__kmp_omp_cancellation) {
+ int cancelled = __kmp_barrier_template<true>(bs_plain_barrier, gtid, FALSE,
+ 0, NULL, NULL);
+ if (cancelled) {
+ int tid = __kmp_tid_from_gtid(gtid);
+ kmp_info_t *this_thr = __kmp_threads[gtid];
+ if (KMP_MASTER_TID(tid)) {
+ // Master does not need to revert anything
+ } else {
+ // Workers need to revert their private b_arrived flag
+ this_thr->th.th_bar[bs_plain_barrier].bb.b_arrived -=
+ KMP_BARRIER_STATE_BUMP;
+ }
+ }
+ return cancelled;
+ }
+ __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
+ return FALSE;
+}
+#endif
+
void __kmp_end_split_barrier(enum barrier_type bt, int gtid) {
KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_end_split_barrier);
KMP_SET_THREAD_STATE_BLOCK(PLAIN_BARRIER);
@@ -1613,12 +1716,12 @@ void __kmp_join_barrier(int gtid) {
my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
if (ompt_enabled.ompt_callback_sync_region) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
- ompt_sync_region_barrier, ompt_scope_begin, my_parallel_data,
+ ompt_sync_region_barrier_implicit, ompt_scope_begin, my_parallel_data,
my_task_data, codeptr);
}
if (ompt_enabled.ompt_callback_sync_region_wait) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
- ompt_sync_region_barrier, ompt_scope_begin, my_parallel_data,
+ ompt_sync_region_barrier_implicit, ompt_scope_begin, my_parallel_data,
my_task_data, codeptr);
}
if (!KMP_MASTER_TID(ds_tid))
@@ -1698,11 +1801,9 @@ void __kmp_join_barrier(int gtid) {
if (__kmp_tasking_mode != tskm_immediate_exec) {
__kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
}
-#if OMP_50_ENABLED
if (__kmp_display_affinity) {
KMP_CHECK_UPDATE(team->t.t_display_affinity, 0);
}
-#endif
#if KMP_STATS_ENABLED
// Have master thread flag the workers to indicate they are now waiting for
// next parallel region, Also wake them up so they switch their timers to
@@ -1726,10 +1827,7 @@ void __kmp_join_barrier(int gtid) {
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Join barrier - report frame end
if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
- __kmp_forkjoin_frames_mode &&
-#if OMP_40_ENABLED
- this_thr->th.th_teams_microtask == NULL &&
-#endif
+ __kmp_forkjoin_frames_mode && this_thr->th.th_teams_microtask == NULL &&
team->t.t_active_level == 1) {
kmp_uint64 cur_time = __itt_get_timestamp();
ident_t *loc = team->t.t_ident;
@@ -1901,11 +1999,13 @@ void __kmp_fork_barrier(int gtid, int tid) {
codeptr = team->t.ompt_team_info.master_return_address;
if (ompt_enabled.ompt_callback_sync_region_wait) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
- ompt_sync_region_barrier, ompt_scope_end, NULL, task_data, codeptr);
+ ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
+ codeptr);
}
if (ompt_enabled.ompt_callback_sync_region) {
ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
- ompt_sync_region_barrier, ompt_scope_end, NULL, task_data, codeptr);
+ ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
+ codeptr);
}
#endif
if (!KMP_MASTER_TID(ds_tid) && ompt_enabled.ompt_callback_implicit_task) {
@@ -1968,17 +2068,13 @@ void __kmp_fork_barrier(int gtid, int tid) {
__kmp_task_team_sync(this_thr, team);
}
-#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
+#if KMP_AFFINITY_SUPPORTED
kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
if (proc_bind == proc_bind_intel) {
-#endif
-#if KMP_AFFINITY_SUPPORTED
// Call dynamic affinity settings
if (__kmp_affinity_type == affinity_balanced && team->t.t_size_changed) {
__kmp_balanced_affinity(this_thr, team->t.t_nproc);
}
-#endif // KMP_AFFINITY_SUPPORTED
-#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
} else if (proc_bind != proc_bind_false) {
if (this_thr->th.th_new_place == this_thr->th.th_current_place) {
KA_TRACE(100, ("__kmp_fork_barrier: T#%d already in correct place %d\n",
@@ -1988,8 +2084,7 @@ void __kmp_fork_barrier(int gtid, int tid) {
__kmp_affinity_set_place(gtid);
}
}
-#endif
-#if OMP_50_ENABLED
+#endif // KMP_AFFINITY_SUPPORTED
// Perform the display affinity functionality
if (__kmp_display_affinity) {
if (team->t.t_display_affinity
@@ -2005,7 +2100,6 @@ void __kmp_fork_barrier(int gtid, int tid) {
}
if (!KMP_MASTER_TID(tid))
KMP_CHECK_UPDATE(this_thr->th.th_def_allocator, team->t.t_def_allocator);
-#endif
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {