21#if KMP_ARCH_X86 || KMP_ARCH_X86_64
22kmp_cpuinfo_t __kmp_cpuinfo = {0};
32kmp_stats_list *__kmp_stats_list;
54volatile int __kmp_init_monitor =
71size_t __kmp_monitor_stksize = 0;
95 "KMP_PLAIN_BARRIER",
"KMP_FORKJOIN_BARRIER"
96#if KMP_FAST_REDUCTION_BARRIER
98 "KMP_REDUCTION_BARRIER"
102 "KMP_PLAIN_BARRIER_PATTERN",
"KMP_FORKJOIN_BARRIER_PATTERN"
103#if KMP_FAST_REDUCTION_BARRIER
105 "KMP_REDUCTION_BARRIER_PATTERN"
109#if KMP_FAST_REDUCTION_BARRIER
115 "linear",
"tree",
"hyper",
"hierarchical",
"dist"};
138#if KMP_NESTED_HOT_TEAMS
139int __kmp_hot_teams_mode = 0;
141int __kmp_hot_teams_max_level = 1;
152#if KMP_USE_HIER_SCHED
162int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS;
164 KMP_MIN_MONITOR_WAKEUPS);
166#ifdef KMP_ADJUST_BLOCKTIME
167int __kmp_zero_bt =
FALSE;
169#ifdef KMP_DFLT_NTH_CORES
175#if (KMP_OS_LINUX || KMP_OS_AIX) && defined(KMP_TDATA_GTID)
190#if KMP_ARCH_X86 || KMP_ARCH_X86_64
191int __kmp_inherit_fp_control =
TRUE;
192kmp_int16 __kmp_init_x87_fpu_control_word = 0;
196#ifdef USE_LOAD_BALANCE
197double __kmp_load_balance_interval = 1.0;
202#if KMP_USE_ADAPTIVE_LOCKS
204kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = {
207#if KMP_DEBUG_ADAPTIVE_LOCKS
208const char *__kmp_speculative_statsfile =
"-";
219#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
220int __kmp_user_level_mwait =
FALSE;
221int __kmp_umwait_enabled =
FALSE;
222int __kmp_mwait_enabled =
FALSE;
223int __kmp_mwait_hints = 0;
227int __kmp_waitpkg_enabled = 0;
228int __kmp_tpause_state = 0;
229int __kmp_tpause_hint = 1;
230int __kmp_tpause_enabled = 0;
246enum clock_function_type __kmp_clock_function;
247int __kmp_clock_function_param;
251enum mic_type __kmp_mic_type = non_mic;
254#if KMP_AFFINITY_SUPPORTED
256KMPAffinity *__kmp_affinity_dispatch = NULL;
259int __kmp_hwloc_error =
FALSE;
260hwloc_topology_t __kmp_hwloc_topology = NULL;
264#if KMP_GROUP_AFFINITY
265int __kmp_num_proc_groups = 1;
267kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL;
268kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL;
269kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL;
270kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL;
273size_t __kmp_affin_mask_size = 0;
274enum affinity_top_method __kmp_affinity_top_method =
275 affinity_top_method_default;
278kmp_affinity_t __kmp_affinity = KMP_AFFINITY_INIT(
"KMP_AFFINITY");
280kmp_affinity_t __kmp_hh_affinity =
281 KMP_AFFINITY_INIT(
"KMP_HIDDEN_HELPER_AFFINITY");
282kmp_affinity_t *__kmp_affinities[] = {&__kmp_affinity, &__kmp_hh_affinity};
284char *__kmp_cpuinfo_file = NULL;
285#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
286int __kmp_first_osid_with_ecore = -1;
359int __kmp_suspend_count = 0;
365int __kmp_forkjoin_frames = 1;
366int __kmp_forkjoin_frames_mode = 3;
402char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = {
'\0'};
403char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = {
'\0'};
404int __kmp_par_range_lb = 0;
405int __kmp_par_range_ub = INT_MAX;
429#if KMP_OS_DARWIN && KMP_ARCH_AARCH64
473#if KMP_USE_INTERNODE_ALIGNMENT
523#if KMP_HANDLE_SIGNALS
543int __kmp_handle_signals =
FALSE;
547int get_suspend_count_(
void) {
548 int count = __kmp_suspend_count;
549 __kmp_suspend_count = 0;
552void set_suspend_count_(
int *
value) { __kmp_suspend_count = *
value; }
567int __kmp_tdg_dot = 0;
569kmp_tdg_info_t **__kmp_global_tdgs = NULL;
575std::atomic<kmp_int32> __kmp_tdg_task_id = 0;
sched_type
Describes the loop schedule to be used for a parallel for loop.
@ kmp_sch_default
default scheduling algorithm
@ kmp_sch_guided_chunked
guided unspecialized
@ kmp_sch_dynamic_chunked
@ kmp_sch_guided_analytical_chunked
@ kmp_sch_guided_iterative_chunked
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void * omp_memspace_handle_t
void * omp_allocator_handle_t
kmp_bootstrap_lock_t __kmp_initz_lock
#define KMP_DFLT_DISP_NUM_BUFF
kmp_lock_t __kmp_debug_lock
enum kmp_target_offload_kind kmp_target_offload_kind_t
kmp_bootstrap_lock_t __kmp_tp_cached_lock
kmp_lock_t __kmp_global_lock
union KMP_ALIGN_CACHE kmp_root kmp_root_t
#define KMP_DEBUG_BUF_CHARS_INIT
#define KMP_DEFAULT_STKSIZE
enum kmp_tasking_mode kmp_tasking_mode_t
int PACKED_REDUCTION_METHOD_T
kmp_bootstrap_lock_t __kmp_forkjoin_lock
kmp_bootstrap_lock_t __kmp_exit_lock
#define KMP_DEFAULT_STKOFFSET
#define KMP_DEFAULT_BLOCKTIME
#define KMP_DEFAULT_MALLOC_POOL_INCR
#define KMP_MIN_STKPADDING
@ reduction_method_not_defined
enum kmp_bar_pat kmp_bar_pat_e
#define KMP_DEBUG_BUF_LINES_INIT
union KMP_ALIGN_CACHE kmp_info kmp_info_t
union KMP_ALIGN_CACHE kmp_global kmp_global_t
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int16
#define KMP_BUILD_ASSERT(expr)
unsigned long long kmp_uint64
kmp_hier_sched_env_t __kmp_hier_scheds
int __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LAST+1]
int __kmp_dispatch_hand_threading
int __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LAST+1]
int __kmp_memkind_available
omp_memspace_handle_t const omp_default_mem_space
volatile kmp_team_t * __kmp_team_pool
kmp_bar_pat_e __kmp_barrier_release_pat_dflt
int __kmp_generate_warnings
volatile int __kmp_init_user_locks
int __kmp_debug_buf_lines
kmp_proc_bind_t __kmp_teams_proc_bind
int __kmp_display_env_verbose
omp_allocator_handle_t const omp_cgroup_mem_alloc
kmp_pause_status_t __kmp_pause_status
KMP_ALIGN_CACHE kmp_global_t __kmp_global
omp_memspace_handle_t const llvm_omp_target_host_mem_space
kmp_int32 __kmp_use_yield
char const * __kmp_barrier_type_name[bs_last_barrier]
char const * __kmp_barrier_pattern_name[bp_last_bar]
int __kmp_dflt_team_nth_ub
int __kmp_dflt_max_active_levels
kmp_bar_pat_e __kmp_barrier_gather_pat_dflt
omp_memspace_handle_t const omp_low_lat_mem_space
int __kmp_storage_map_verbose_specified
kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier]
kmp_tasking_mode_t __kmp_tasking_mode
char * __kmp_affinity_format
volatile kmp_info_t * __kmp_thread_pool
volatile int __kmp_init_gtid
omp_allocator_handle_t __kmp_def_allocator
int __kmp_omp_cancellation
kmp_nested_proc_bind_t __kmp_nested_proc_bind
kmp_nested_nthreads_t __kmp_nested_nth
omp_allocator_handle_t const omp_default_mem_alloc
kmp_cached_addr_t * __kmp_threadpriv_cache_list
volatile int __kmp_all_nth
kmp_target_offload_kind_t __kmp_target_offload
int __kmp_debug_buf_chars
int __kmp_adjust_gtid_mode
kmp_old_threads_list_t * __kmp_old_threads_list
volatile int __kmp_init_common
enum sched_type __kmp_auto
kmp_int32 __kmp_use_yield_exp_set
omp_allocator_handle_t const omp_large_cap_mem_alloc
volatile int __kmp_init_hidden_helper
omp_allocator_handle_t const omp_low_lat_mem_alloc
volatile int __kmp_init_middle
omp_allocator_handle_t const omp_high_bw_mem_alloc
std::atomic< kmp_int32 > __kmp_task_counter
int __kmp_storage_map_verbose
int __kmp_allThreadsSpecified
enum sched_type __kmp_static
enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext+kmp_sched_upper_std - kmp_sched_lower - 2]
int __kmp_affinity_num_places
int __kmp_duplicate_library_ok
volatile int __kmp_need_register_serial
omp_memspace_handle_t const omp_const_mem_space
kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier]
kmp_int32 __kmp_default_device
omp_memspace_handle_t const omp_large_cap_mem_space
int __kmp_force_monotonic
enum sched_type __kmp_sched
int __kmp_enable_task_throttling
kmp_uint32 __kmp_barrier_gather_bb_dflt
KMP_ALIGN_CACHE kmp_info_t ** __kmp_threads
kmp_uint32 __kmp_barrier_release_bb_dflt
int __kmp_task_stealing_constraint
int __kmp_need_register_atfork
KMP_ALIGN_CACHE volatile int __kmp_nth
int __kmp_dispatch_num_buffers
kmp_uint32 __kmp_yield_init
bool __kmp_dflt_max_active_levels_set
omp_memspace_handle_t const llvm_omp_target_shared_mem_space
char * __kmp_debug_buffer
omp_memspace_handle_t const omp_high_bw_mem_space
int __kmp_nesting_mode_nlevels
int * __kmp_nesting_nth_level
omp_allocator_handle_t const omp_const_mem_alloc
volatile int __kmp_init_parallel
omp_allocator_handle_t const omp_pteam_mem_alloc
kmp_queuing_lock_t __kmp_dispatch_lock
omp_allocator_handle_t const llvm_omp_target_host_mem_alloc
int __kmp_need_register_atfork_specified
omp_allocator_handle_t const kmp_max_mem_alloc
enum library_type __kmp_library
KMP_ALIGN_CACHE std::atomic< int > __kmp_thread_pool_active_nth
kmp_key_t __kmp_gtid_threadprivate_key
int __kmp_env_consistency_check
kmp_uint64 __kmp_pause_init
kmp_uint64 __kmp_taskloop_min_tasks
char const * __kmp_barrier_branch_bit_env_name[bs_last_barrier]
volatile int __kmp_hidden_helper_team_done
size_t __kmp_sys_min_stksize
char __kmp_blocktime_units
kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier]
size_t __kmp_malloc_pool_incr
int __kmp_threads_capacity
int __kmp_debug_buf_warn_chars
bool __kmp_wpolicy_passive
volatile int __kmp_init_hidden_helper_threads
int __kmp_display_affinity
enum sched_type __kmp_guided
PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method
omp_allocator_handle_t const omp_thread_mem_alloc
kmp_int32 __kmp_max_task_priority
int __kmp_teams_thread_limit
char const * __kmp_barrier_pattern_env_name[bs_last_barrier]
std::atomic< int > __kmp_debug_count
omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc
volatile int __kmp_init_serial
omp_memspace_handle_t const llvm_omp_target_device_mem_space
int __kmp_debug_buf_atomic
kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier]
std::atomic< kmp_int32 > __kmp_team_counter
omp_allocator_handle_t const llvm_omp_target_device_mem_alloc
omp_allocator_handle_t const omp_null_allocator
kmp_uint32 __kmp_yield_next
#define KMP_LOCK_INIT(lock)
#define KMP_BOOTSTRAP_LOCK_INIT(lock)
#define KMP_ALIGN_CACHE_INTERNODE
Functions for collecting statistics.