diff options
Diffstat (limited to 'runtime/src/kmp_global.cpp')
-rw-r--r-- | runtime/src/kmp_global.cpp | 101 |
1 files changed, 49 insertions, 52 deletions
diff --git a/runtime/src/kmp_global.cpp b/runtime/src/kmp_global.cpp index 5f38009af8f0..1ec73b82e99c 100644 --- a/runtime/src/kmp_global.cpp +++ b/runtime/src/kmp_global.cpp @@ -4,10 +4,9 @@ //===----------------------------------------------------------------------===// // -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// @@ -63,11 +62,6 @@ int __kmp_version = 0; std::atomic<kmp_int32> __kmp_team_counter = ATOMIC_VAR_INIT(0); std::atomic<kmp_int32> __kmp_task_counter = ATOMIC_VAR_INIT(0); -unsigned int __kmp_init_wait = - KMP_DEFAULT_INIT_WAIT; /* initial number of spin-tests */ -unsigned int __kmp_next_wait = - KMP_DEFAULT_NEXT_WAIT; /* susequent number of spin-tests */ - size_t __kmp_stksize = KMP_DEFAULT_STKSIZE; #if KMP_USE_MONITOR size_t __kmp_monitor_stksize = 0; // auto adjust @@ -133,10 +127,9 @@ int __kmp_dflt_team_nth = 0; int __kmp_dflt_team_nth_ub = 0; int __kmp_tp_capacity = 0; int __kmp_tp_cached = 0; -int __kmp_dflt_nested = FALSE; int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF; -int __kmp_dflt_max_active_levels = - KMP_MAX_ACTIVE_LEVELS_LIMIT; /* max_active_levels limit */ +int __kmp_dflt_max_active_levels = 1; // Nesting off by default +bool __kmp_dflt_max_active_levels_set = false; // Don't override set value #if KMP_NESTED_HOT_TEAMS int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */ /* 1 - keep extra threads when reduced */ @@ -209,11 +202,9 @@ const char *__kmp_speculative_statsfile = "-"; #endif // KMP_USE_ADAPTIVE_LOCKS -#if OMP_40_ENABLED int __kmp_display_env = FALSE; int __kmp_display_env_verbose = FALSE; int __kmp_omp_cancellation = FALSE; -#endif /* map OMP 3.0 schedule types with our internal schedule types */ enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext + @@ -277,15 +268,10 @@ char *__kmp_cpuinfo_file = NULL; #endif /* KMP_AFFINITY_SUPPORTED */ -#if OMP_40_ENABLED kmp_nested_proc_bind_t __kmp_nested_proc_bind = {NULL, 0, 0}; int __kmp_affinity_num_places = 0; -#endif - -#if OMP_50_ENABLED int __kmp_display_affinity = FALSE; char *__kmp_affinity_format = NULL; -#endif // OMP_50_ENABLED kmp_hws_item_t __kmp_hws_socket = {0, 0}; kmp_hws_item_t __kmp_hws_node = {0, 0}; @@ -295,30 +281,44 @@ kmp_hws_item_t __kmp_hws_proc = {0, 0}; int __kmp_hws_requested = 0; int __kmp_hws_abs_flag = 0; // absolute or per-item number requested -#if OMP_40_ENABLED kmp_int32 __kmp_default_device = 0; -#endif kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams; -#if OMP_45_ENABLED kmp_int32 __kmp_max_task_priority = 0; kmp_uint64 __kmp_taskloop_min_tasks = 0; -#endif -#if OMP_50_ENABLED int __kmp_memkind_available = 0; -int __kmp_hbw_mem_available = 0; -const omp_allocator_t *OMP_NULL_ALLOCATOR = NULL; -const omp_allocator_t *omp_default_mem_alloc = (const omp_allocator_t *)1; -const omp_allocator_t *omp_large_cap_mem_alloc = (const omp_allocator_t *)2; -const omp_allocator_t *omp_const_mem_alloc = (const omp_allocator_t *)3; -const omp_allocator_t *omp_high_bw_mem_alloc = (const omp_allocator_t *)4; -const omp_allocator_t *omp_low_lat_mem_alloc = (const omp_allocator_t *)5; -const omp_allocator_t *omp_cgroup_mem_alloc = (const omp_allocator_t *)6; -const omp_allocator_t *omp_pteam_mem_alloc = (const omp_allocator_t *)7; -const omp_allocator_t *omp_thread_mem_alloc = (const omp_allocator_t *)8; -void *const *__kmp_def_allocator = omp_default_mem_alloc; -#endif +omp_allocator_handle_t const omp_null_allocator = NULL; +omp_allocator_handle_t const omp_default_mem_alloc = + (omp_allocator_handle_t const)1; +omp_allocator_handle_t const omp_large_cap_mem_alloc = + (omp_allocator_handle_t const)2; +omp_allocator_handle_t const omp_const_mem_alloc = + (omp_allocator_handle_t const)3; +omp_allocator_handle_t const omp_high_bw_mem_alloc = + (omp_allocator_handle_t const)4; +omp_allocator_handle_t const omp_low_lat_mem_alloc = + (omp_allocator_handle_t const)5; +omp_allocator_handle_t const omp_cgroup_mem_alloc = + (omp_allocator_handle_t const)6; +omp_allocator_handle_t const omp_pteam_mem_alloc = + (omp_allocator_handle_t const)7; +omp_allocator_handle_t const omp_thread_mem_alloc = + (omp_allocator_handle_t const)8; +omp_allocator_handle_t const kmp_max_mem_alloc = + (omp_allocator_handle_t const)1024; +omp_allocator_handle_t __kmp_def_allocator = omp_default_mem_alloc; + +omp_memspace_handle_t const omp_default_mem_space = + (omp_memspace_handle_t const)0; +omp_memspace_handle_t const omp_large_cap_mem_space = + (omp_memspace_handle_t const)1; +omp_memspace_handle_t const omp_const_mem_space = + (omp_memspace_handle_t const)2; +omp_memspace_handle_t const omp_high_bw_mem_space = + (omp_memspace_handle_t const)3; +omp_memspace_handle_t const omp_low_lat_mem_space = + (omp_memspace_handle_t const)4; /* This check ensures that the compiler is passing the correct data type for the flags formal parameter of the function kmpc_omp_task_alloc(). If the type is @@ -328,6 +328,7 @@ void *const *__kmp_def_allocator = omp_default_mem_alloc; KMP_BUILD_ASSERT(sizeof(kmp_tasking_flags_t) == 4); int __kmp_task_stealing_constraint = 1; /* Constrain task stealing by default */ +int __kmp_enable_task_throttling = 1; #ifdef DEBUG_SUSPEND int __kmp_suspend_count = 0; @@ -396,22 +397,17 @@ int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */ int __kmp_env_checks = FALSE; /* KMP_CHECKS specified? */ int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */ +// From KMP_USE_YIELD: +// 0 = never yield; +// 1 = always yield (default); +// 2 = yield only if oversubscribed +kmp_int32 __kmp_use_yield = 1; +// This will be 1 if KMP_USE_YIELD environment variable was set explicitly +kmp_int32 __kmp_use_yield_exp_set = 0; + kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT; kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT; -#if KMP_USE_MONITOR -kmp_uint32 __kmp_yielding_on = 1; -#endif -#if KMP_OS_CNK -kmp_uint32 __kmp_yield_cycle = 0; -#else -kmp_uint32 __kmp_yield_cycle = 1; /* Yield-cycle is on by default */ -#endif -kmp_int32 __kmp_yield_on_count = - 10; /* By default, yielding is on for 10 monitor periods. */ -kmp_int32 __kmp_yield_off_count = - 1; /* By default, yielding is off for 1 monitor periods. */ - /* ------------------------------------------------------ */ /* STATE mostly syncronized with global lock */ /* data written to rarely by masters, read often by workers */ @@ -426,7 +422,6 @@ kmp_root_t **__kmp_root = NULL; KMP_ALIGN_CACHE volatile int __kmp_nth = 0; volatile int __kmp_all_nth = 0; -int __kmp_thread_pool_nth = 0; volatile kmp_info_t *__kmp_thread_pool = NULL; volatile kmp_team_t *__kmp_team_pool = NULL; @@ -531,7 +526,9 @@ int _You_must_link_with_Intel_OpenMP_library = 1; int _You_must_link_with_Microsoft_OpenMP_library = 1; #endif -#if OMP_50_ENABLED kmp_target_offload_kind_t __kmp_target_offload = tgt_default; -#endif + +// OMP Pause Resources +kmp_pause_status_t __kmp_pause_status = kmp_not_paused; + // end of file // |