summaryrefslogtreecommitdiff
path: root/openmp/runtime/src/kmp_dispatch.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2020-01-17 20:45:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2020-01-17 20:45:01 +0000
commit706b4fc47bbc608932d3b491ae19a3b9cde9497b (patch)
tree4adf86a776049cbf7f69a1929c4babcbbef925eb /openmp/runtime/src/kmp_dispatch.cpp
parent7cc9cf2bf09f069cb2dd947ead05d0b54301fb71 (diff)
Notes
Diffstat (limited to 'openmp/runtime/src/kmp_dispatch.cpp')
-rw-r--r--openmp/runtime/src/kmp_dispatch.cpp21
1 files changed, 11 insertions, 10 deletions
diff --git a/openmp/runtime/src/kmp_dispatch.cpp b/openmp/runtime/src/kmp_dispatch.cpp
index 161a2c696357..a91ffa2ba299 100644
--- a/openmp/runtime/src/kmp_dispatch.cpp
+++ b/openmp/runtime/src/kmp_dispatch.cpp
@@ -379,14 +379,15 @@ void __kmp_dispatch_init_algorithm(ident_t *loc, int gtid,
}
break;
} else {
- KD_TRACE(100, ("__kmp_dispatch_init_algorithm: T#%d falling-through to "
- "kmp_sch_static_balanced\n",
- gtid));
- schedule = kmp_sch_static_balanced;
- /* too few iterations: fall-through to kmp_sch_static_balanced */
+ /* too few chunks: switching to kmp_sch_dynamic_chunked */
+ schedule = kmp_sch_dynamic_chunked;
+ KD_TRACE(100, ("__kmp_dispatch_init_algorithm: T#%d switching to "
+ "kmp_sch_dynamic_chunked\n",
+ gtid));
+ if (pr->u.p.parm1 <= 0)
+ pr->u.p.parm1 = KMP_DEFAULT_CHUNK;
+ break;
} // if
- /* FALL-THROUGH to static balanced */
- KMP_FALLTHROUGH();
} // case
#endif
case kmp_sch_static_balanced: {
@@ -1532,7 +1533,7 @@ int __kmp_dispatch_next_algorithm(int gtid,
if ((T)remaining <
pr->u.p.parm2) { // compare with K*nproc*(chunk+1), K=2 by default
// use dynamic-style shcedule
- // atomically inrement iterations, get old value
+ // atomically increment iterations, get old value
init = test_then_add<ST>(RCAST(volatile ST *, &sh->u.s.iteration),
(ST)chunkspec);
remaining = trip - init;
@@ -1601,7 +1602,7 @@ int __kmp_dispatch_next_algorithm(int gtid,
// compare with K*nproc*(chunk+1), K=2 by default
if ((T)remaining < pr->u.p.parm2) {
// use dynamic-style shcedule
- // atomically inrement iterations, get old value
+ // atomically increment iterations, get old value
init = test_then_add<ST>(RCAST(volatile ST *, &sh->u.s.iteration),
(ST)chunk);
remaining = trip - init;
@@ -1892,7 +1893,7 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
typedef typename traits_t<T>::signed_t ST;
// This is potentially slightly misleading, schedule(runtime) will appear here
// even if the actual runtme schedule is static. (Which points out a
- // disadavantage of schedule(runtime): even when static scheduling is used it
+ // disadvantage of schedule(runtime): even when static scheduling is used it
// costs more than a compile time choice to use static scheduling would.)
KMP_TIME_PARTITIONED_BLOCK(OMP_loop_dynamic_scheduling);