17#include "kmp_config.h"
27#ifndef KMP_STATIC_STEAL_ENABLED
28#define KMP_STATIC_STEAL_ENABLED 1
30#define KMP_WEIGHTED_ITERATIONS_SUPPORTED \
31 (KMP_AFFINITY_SUPPORTED && KMP_STATIC_STEAL_ENABLED && \
32 (KMP_ARCH_X86 || KMP_ARCH_X86_64))
34#define TASK_CURRENT_NOT_QUEUED 0
35#define TASK_CURRENT_QUEUED 1
37#ifdef BUILD_TIED_TASK_STACK
38#define TASK_STACK_EMPTY 0
39#define TASK_STACK_BLOCK_BITS 5
41#define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
43#define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
46#define TASK_NOT_PUSHED 1
47#define TASK_SUCCESSFULLY_PUSHED 0
50#define TASK_EXPLICIT 1
51#define TASK_IMPLICIT 0
54#define TASK_DETACHABLE 1
55#define TASK_UNDETACHABLE 0
57#define KMP_CANCEL_THREADS
58#define KMP_THREAD_ATTR
62#if defined(__ANDROID__)
63#undef KMP_CANCEL_THREADS
69#undef KMP_CANCEL_THREADS
100#if KMP_USE_HIER_SCHED
102#undef KMP_USE_HIER_SCHED
103#define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
107#if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED && !defined(OMPD_SKIP_HWLOC)
109#ifndef HWLOC_OBJ_NUMANODE
110#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
112#ifndef HWLOC_OBJ_PACKAGE
113#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
117#if KMP_ARCH_X86 || KMP_ARCH_X86_64
118#include <xmmintrin.h>
122#define KMP_INTERNAL_MALLOC(sz) malloc(sz)
123#define KMP_INTERNAL_FREE(p) free(p)
124#define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
125#define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
136#define KMP_HANDLE_SIGNALS ((KMP_OS_UNIX && !KMP_OS_WASI) || KMP_OS_WINDOWS)
141#if !defined NSIG && defined _NSIG
147#pragma weak clock_gettime
159#define UNLIKELY(x) (x)
168#ifndef USE_FAST_MEMORY
169#define USE_FAST_MEMORY 3
172#ifndef KMP_NESTED_HOT_TEAMS
173#define KMP_NESTED_HOT_TEAMS 0
174#define USE_NESTED_HOT_ARG(x)
176#if KMP_NESTED_HOT_TEAMS
177#define USE_NESTED_HOT_ARG(x) , x
179#define USE_NESTED_HOT_ARG(x)
184#ifndef USE_CMP_XCHG_FOR_BGET
185#define USE_CMP_XCHG_FOR_BGET 1
193#define KMP_NSEC_PER_SEC 1000000000L
194#define KMP_USEC_PER_SEC 1000000L
195#define KMP_NSEC_PER_USEC 1000L
278template <
bool C = false,
bool S = true>
class kmp_flag_32;
279template <
bool C = false,
bool S = true>
class kmp_flag_64;
291#define KMP_PACK_64(HIGH_32, LOW_32) \
292 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
297 while (*(_x) == ' ' || *(_x) == '\t') \
300#define SKIP_DIGITS(_x) \
302 while (*(_x) >= '0' && *(_x) <= '9') \
305#define SKIP_TOKEN(_x) \
307 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
308 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
311#define SKIP_TO(_x, _c) \
313 while (*(_x) != '\0' && *(_x) != (_c)) \
319#define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
320#define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
335#ifdef USE_LOAD_BALANCE
336 dynamic_load_balance,
345#ifndef KMP_SCHED_TYPE_DEFINED
346#define KMP_SCHED_TYPE_DEFINED
357#if KMP_STATIC_STEAL_ENABLED
358 kmp_sched_static_steal = 102,
466#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
467#define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
468#define SCHEDULE_HAS_NO_MODIFIERS(s) \
469 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
470#define SCHEDULE_GET_MODIFIERS(s) \
471 ((enum sched_type)( \
472 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
473#define SCHEDULE_SET_MODIFIERS(s, m) \
474 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
475#define SCHEDULE_NONMONOTONIC 0
476#define SCHEDULE_MONOTONIC 1
495 *internal_kind = (
enum sched_type)((
int)*internal_kind |
525enum clock_function_type {
526 clock_function_gettimeofday,
527 clock_function_clock_gettime
532enum mic_type { non_mic, mic1, mic2, mic3, dummy };
546#undef KMP_FAST_REDUCTION_BARRIER
547#define KMP_FAST_REDUCTION_BARRIER 1
549#undef KMP_FAST_REDUCTION_CORE_DUO
550#if KMP_ARCH_X86 || KMP_ARCH_X86_64
551#define KMP_FAST_REDUCTION_CORE_DUO 1
574#if KMP_FAST_REDUCTION_BARRIER
575#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
576 ((reduction_method) | (barrier_type))
578#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
579 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
581#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
582 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
584#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
587#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
588 (packed_reduction_method)
590#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
593#define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
594 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
595 (which_reduction_block))
597#if KMP_FAST_REDUCTION_BARRIER
598#define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
599 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
601#define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
602 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
613#pragma warning(disable : 271 310)
647#if KMP_ARCH_X86 || KMP_ARCH_X86_64
648 KMP_HW_CORE_TYPE_ATOM = 0x20,
649 KMP_HW_CORE_TYPE_CORE = 0x40,
656#define KMP_HW_MAX_NUM_CORE_EFFS 8
658#define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
659 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
660#define KMP_ASSERT_VALID_HW_TYPE(type) \
661 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
663#define KMP_FOREACH_HW_TYPE(type) \
664 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
665 type = (kmp_hw_t)((int)type + 1))
672#if KMP_AFFINITY_SUPPORTED
676#if _MSC_VER < 1600 && KMP_MSVC_COMPAT
677typedef struct GROUP_AFFINITY {
683#if KMP_GROUP_AFFINITY
684extern int __kmp_num_proc_groups;
686static const int __kmp_num_proc_groups = 1;
688typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
689extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
691typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(
void);
692extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
694typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
695extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
697typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
699extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
702#if KMP_USE_HWLOC && !defined(OMPD_SKIP_HWLOC)
703extern hwloc_topology_t __kmp_hwloc_topology;
704extern int __kmp_hwloc_error;
707extern size_t __kmp_affin_mask_size;
708#define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
709#define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
710#define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
711#define KMP_CPU_SET_ITERATE(i, mask) \
712 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
713#define KMP_CPU_SET(i, mask) (mask)->set(i)
714#define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
715#define KMP_CPU_CLR(i, mask) (mask)->clear(i)
716#define KMP_CPU_ZERO(mask) (mask)->zero()
717#define KMP_CPU_ISEMPTY(mask) (mask)->empty()
718#define KMP_CPU_COPY(dest, src) (dest)->copy(src)
719#define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
720#define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
721#define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
722#define KMP_CPU_EQUAL(dest, src) (dest)->is_equal(src)
723#define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
724#define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
725#define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
726#define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
727#define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
728#define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
729#define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
730#define KMP_CPU_ALLOC_ARRAY(arr, n) \
731 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
732#define KMP_CPU_FREE_ARRAY(arr, n) \
733 __kmp_affinity_dispatch->deallocate_mask_array(arr)
734#define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
735#define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
736#define __kmp_get_system_affinity(mask, abort_bool) \
737 (mask)->get_system_affinity(abort_bool)
738#define __kmp_set_system_affinity(mask, abort_bool) \
739 (mask)->set_system_affinity(abort_bool)
740#define __kmp_get_proc_group(mask) (mask)->get_proc_group()
746 void *
operator new(
size_t n);
747 void operator delete(
void *
p);
748 void *
operator new[](
size_t n);
749 void operator delete[](
void *
p);
752 virtual void set(
int i) {}
754 virtual bool is_set(
int i)
const {
return false; }
756 virtual void clear(
int i) {}
758 virtual void zero() {}
760 virtual bool empty()
const {
return true; }
762 virtual void copy(
const Mask *src) {}
764 virtual void bitwise_and(
const Mask *rhs) {}
766 virtual void bitwise_or(
const Mask *rhs) {}
768 virtual void bitwise_not() {}
770 virtual bool is_equal(
const Mask *rhs)
const {
return false; }
773 virtual int begin()
const {
return 0; }
774 virtual int end()
const {
return 0; }
775 virtual int next(
int previous)
const {
return 0; }
777 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
780 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
782 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
785 virtual int get_proc_group()
const {
return -1; }
786 int get_max_cpu()
const {
789 KMP_CPU_SET_ITERATE(cpu,
this) {
796 void *
operator new(
size_t n);
797 void operator delete(
void *
p);
799 virtual ~KMPAffinity() =
default;
801 virtual void determine_capable(
const char *env_var) {}
803 virtual void bind_thread(
int proc) {}
805 virtual Mask *allocate_mask() {
return nullptr; }
806 virtual void deallocate_mask(Mask *m) {}
807 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
808 virtual void deallocate_mask_array(Mask *m) {}
809 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
810 static void pick_api();
811 static void destroy_api();
819 virtual api_type get_api_type()
const {
825 static bool picked_api;
828typedef KMPAffinity::Mask kmp_affin_mask_t;
829extern KMPAffinity *__kmp_affinity_dispatch;
832class kmp_affinity_raii_t {
833 kmp_affin_mask_t *
mask;
837 kmp_affinity_raii_t(
const kmp_affin_mask_t *new_mask =
nullptr)
838 :
mask(nullptr), restored(false) {
839 if (KMP_AFFINITY_CAPABLE()) {
842 __kmp_get_system_affinity(
mask,
true);
844 __kmp_set_system_affinity(new_mask,
true);
848 if (
mask && KMP_AFFINITY_CAPABLE() && !restored) {
849 __kmp_set_system_affinity(
mask,
true);
854 ~kmp_affinity_raii_t() { restore(); }
860#define KMP_AFFIN_MASK_PRINT_LEN 1024
874enum affinity_top_method {
875 affinity_top_method_all = 0,
876#if KMP_ARCH_X86 || KMP_ARCH_X86_64
877 affinity_top_method_apicid,
878 affinity_top_method_x2apicid,
879 affinity_top_method_x2apicid_1f,
881 affinity_top_method_cpuinfo,
882#if KMP_GROUP_AFFINITY
883 affinity_top_method_group,
885 affinity_top_method_flat,
887 affinity_top_method_hwloc,
889 affinity_top_method_default
892#define affinity_respect_mask_default (2)
894typedef struct kmp_affinity_flags_t {
896 unsigned verbose : 1;
897 unsigned warnings : 1;
898 unsigned respect : 2;
901 unsigned core_types_gran : 1;
902 unsigned core_effs_gran : 1;
903 unsigned omp_places : 1;
904 unsigned reserved : 22;
905} kmp_affinity_flags_t;
908typedef struct kmp_affinity_ids_t {
913typedef struct kmp_affinity_attrs_t {
917 unsigned reserved : 15;
918} kmp_affinity_attrs_t;
919#define KMP_AFFINITY_ATTRS_UNKNOWN \
920 { KMP_HW_CORE_TYPE_UNKNOWN, kmp_hw_attr_t::UNKNOWN_CORE_EFF, 0, 0 }
922typedef struct kmp_affinity_t {
924 enum affinity_type
type;
927 kmp_affinity_attrs_t core_attr_gran;
930 kmp_affinity_flags_t flags;
932 kmp_affin_mask_t *masks;
933 kmp_affinity_ids_t *ids;
934 kmp_affinity_attrs_t *attrs;
935 unsigned num_os_id_masks;
936 kmp_affin_mask_t *os_id_masks;
940#define KMP_AFFINITY_INIT(env) \
942 nullptr, affinity_default, KMP_HW_UNKNOWN, -1, KMP_AFFINITY_ATTRS_UNKNOWN, \
944 {TRUE, FALSE, TRUE, affinity_respect_mask_default, FALSE, FALSE, \
945 FALSE, FALSE, FALSE}, \
946 0, nullptr, nullptr, nullptr, 0, nullptr, env \
949extern enum affinity_top_method __kmp_affinity_top_method;
950extern kmp_affinity_t __kmp_affinity;
951extern kmp_affinity_t __kmp_hh_affinity;
952extern kmp_affinity_t *__kmp_affinities[2];
956extern kmp_affin_mask_t *__kmp_affin_fullMask;
957extern kmp_affin_mask_t *__kmp_affin_origMask;
958extern char *__kmp_cpuinfo_file;
960#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
961extern int __kmp_first_osid_with_ecore;
990extern int __kmp_tool;
991extern char *__kmp_tool_libraries;
994#if KMP_AFFINITY_SUPPORTED
995#define KMP_PLACE_ALL (-1)
996#define KMP_PLACE_UNDEFINED (-2)
998#define KMP_AFFINITY_NON_PROC_BIND \
999 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
1000 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
1001 (__kmp_affinity.num_masks > 0 || __kmp_affinity.type == affinity_balanced))
1031#define KMP_PAD(type, sz) \
1032 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
1036#define KMP_GTID_DNE (-2)
1037#define KMP_GTID_SHUTDOWN (-3)
1038#define KMP_GTID_MONITOR (-4)
1039#define KMP_GTID_UNKNOWN (-5)
1040#define KMP_GTID_MIN (-6)
1080#define omp_atv_default ((omp_uintptr_t)-1)
1142extern void *
__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1149extern void *
__kmp_alloc(
int gtid,
size_t align,
size_t sz,
1151extern void *
__kmp_calloc(
int gtid,
size_t align,
size_t nmemb,
size_t sz,
1164#if ENABLE_LIBOMPTARGET
1165extern void __kmp_init_target_task();
1170#define KMP_UINT64_MAX \
1171 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1173#define KMP_MIN_NTH 1
1176#if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1177#define KMP_MAX_NTH PTHREAD_THREADS_MAX
1183#define KMP_MAX_NTH 64
1185#define KMP_MAX_NTH INT_MAX
1190#ifdef PTHREAD_STACK_MIN
1191#define KMP_MIN_STKSIZE ((size_t)PTHREAD_STACK_MIN)
1193#define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1196#if KMP_OS_AIX && KMP_ARCH_PPC
1197#define KMP_MAX_STKSIZE 0x10000000
1199#define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1203#define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1204#elif KMP_ARCH_X86_64
1205#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1206#define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1210#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1213#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1215#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1218#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1219#define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1220#define KMP_MAX_MALLOC_POOL_INCR \
1221 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1223#define KMP_MIN_STKOFFSET (0)
1224#define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1226#define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1228#define KMP_DEFAULT_STKOFFSET CACHE_LINE
1231#define KMP_MIN_STKPADDING (0)
1232#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1234#define KMP_BLOCKTIME_MULTIPLIER \
1236#define KMP_MIN_BLOCKTIME (0)
1237#define KMP_MAX_BLOCKTIME \
1241#define KMP_DEFAULT_BLOCKTIME (__kmp_is_hybrid_cpu() ? (0) : (200000))
1244#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1245#define KMP_MIN_MONITOR_WAKEUPS (1)
1246#define KMP_MAX_MONITOR_WAKEUPS (1000)
1250#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1251 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1252 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1253 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1254 ? (monitor_wakeups) \
1255 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1259#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1260 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1261 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1263#define KMP_BLOCKTIME(team, tid) \
1264 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1265#if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1269#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1270#define KMP_NOW() ((kmp_uint64)_rdtsc())
1272#define KMP_NOW() __kmp_hardware_timestamp()
1274#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1275 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_usec)
1276#define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1280#define KMP_NOW() __kmp_now_nsec()
1281#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1282 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * (kmp_uint64)KMP_NSEC_PER_USEC)
1283#define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1287#define KMP_MIN_STATSCOLS 40
1288#define KMP_MAX_STATSCOLS 4096
1289#define KMP_DEFAULT_STATSCOLS 80
1291#define KMP_MIN_INTERVAL 0
1292#define KMP_MAX_INTERVAL (INT_MAX - 1)
1293#define KMP_DEFAULT_INTERVAL 0
1295#define KMP_MIN_CHUNK 1
1296#define KMP_MAX_CHUNK (INT_MAX - 1)
1297#define KMP_DEFAULT_CHUNK 1
1299#define KMP_MIN_DISP_NUM_BUFF 1
1300#define KMP_DFLT_DISP_NUM_BUFF 7
1301#define KMP_MAX_DISP_NUM_BUFF 4096
1303#define KMP_MAX_ORDERED 8
1305#define KMP_MAX_FIELDS 32
1307#define KMP_MAX_BRANCH_BITS 31
1309#define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1311#define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1313#define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1318#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1319#define KMP_TLS_GTID_MIN 5
1321#define KMP_TLS_GTID_MIN INT_MAX
1324#define KMP_MASTER_TID(tid) (0 == (tid))
1325#define KMP_WORKER_TID(tid) (0 != (tid))
1327#define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1328#define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1329#define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1333#define TRUE (!FALSE)
1339#define KMP_INIT_WAIT 64U
1340#define KMP_NEXT_WAIT 32U
1342#define KMP_INIT_WAIT 1024U
1343#define KMP_NEXT_WAIT 512U
1346#define KMP_INIT_WAIT 1024U
1347#define KMP_NEXT_WAIT 512U
1348#elif KMP_OS_DRAGONFLY
1350#define KMP_INIT_WAIT 1024U
1351#define KMP_NEXT_WAIT 512U
1354#define KMP_INIT_WAIT 1024U
1355#define KMP_NEXT_WAIT 512U
1358#define KMP_INIT_WAIT 1024U
1359#define KMP_NEXT_WAIT 512U
1362#define KMP_INIT_WAIT 1024U
1363#define KMP_NEXT_WAIT 512U
1366#define KMP_INIT_WAIT 1024U
1367#define KMP_NEXT_WAIT 512U
1370#define KMP_INIT_WAIT 1024U
1371#define KMP_NEXT_WAIT 512U
1374#define KMP_INIT_WAIT 1024U
1375#define KMP_NEXT_WAIT 512U
1378#define KMP_INIT_WAIT 1024U
1379#define KMP_NEXT_WAIT 512U
1382#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1383typedef struct kmp_cpuid {
1390typedef struct kmp_cpuinfo_flags_t {
1393 unsigned hybrid : 1;
1394 unsigned reserved : 29;
1395} kmp_cpuinfo_flags_t;
1397typedef struct kmp_cpuinfo {
1404 kmp_cpuinfo_flags_t flags;
1407 char name[3 *
sizeof(kmp_cpuid_t)];
1410extern void __kmp_query_cpuid(kmp_cpuinfo_t *
p);
1415static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *
p) {
1416 __asm__ __volatile__(
"cpuid"
1417 :
"=a"(
p->eax),
"=b"(
p->ebx),
"=c"(
p->ecx),
"=d"(
p->edx)
1418 :
"a"(leaf),
"c"(subleaf));
1421static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *
p) {
1422 __asm__ __volatile__(
"fldcw %0" : :
"m"(*
p));
1425static inline void __kmp_store_x87_fpu_control_word(
kmp_int16 *
p) {
1426 __asm__ __volatile__(
"fstcw %0" :
"=m"(*
p));
1428static inline void __kmp_clear_x87_fpu_status_word() {
1431 struct x87_fpu_state {
1440 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1441 __asm__ __volatile__(
"fstenv %0\n\t"
1442 "andw $0x7f00, %1\n\t"
1444 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1446 __asm__ __volatile__(
"fnclex");
1450static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) { _mm_setcsr(*
p); }
1451static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = _mm_getcsr(); }
1453static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) {}
1454static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = 0; }
1458extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *
p);
1459extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *
p);
1460extern void __kmp_store_x87_fpu_control_word(
kmp_int16 *
p);
1461extern void __kmp_clear_x87_fpu_status_word();
1462static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) { _mm_setcsr(*
p); }
1463static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = _mm_getcsr(); }
1466#define KMP_X86_MXCSR_MASK 0xffffffc0
1471#if KMP_HAVE_WAITPKG_INTRINSICS
1472#if KMP_HAVE_IMMINTRIN_H
1473#include <immintrin.h>
1474#elif KMP_HAVE_INTRIN_H
1480static inline int __kmp_tpause(uint32_t hint, uint64_t
counter) {
1481#if !KMP_HAVE_WAITPKG_INTRINSICS
1482 uint32_t timeHi = uint32_t(
counter >> 32);
1483 uint32_t timeLo = uint32_t(
counter & 0xffffffff);
1485 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1491 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1495 return _tpause(hint,
counter);
1499static inline void __kmp_umonitor(
void *cacheline) {
1500#if !KMP_HAVE_WAITPKG_INTRINSICS
1501 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1506 _umonitor(cacheline);
1510static inline int __kmp_umwait(uint32_t hint, uint64_t
counter) {
1511#if !KMP_HAVE_WAITPKG_INTRINSICS
1512 uint32_t timeHi = uint32_t(
counter >> 32);
1513 uint32_t timeLo = uint32_t(
counter & 0xffffffff);
1515 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1521 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1525 return _umwait(hint,
counter);
1530#include <pmmintrin.h>
1538__kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1539 _mm_monitor(cacheline, extensions, hints);
1545__kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1546 _mm_mwait(extensions, hints);
1551extern void __kmp_x86_pause(
void);
1557static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1559static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1561#define KMP_CPU_PAUSE() __kmp_x86_pause()
1563#define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1564#define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1565#define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1566#define KMP_CPU_PAUSE() \
1568 KMP_PPC64_PRI_LOW(); \
1569 KMP_PPC64_PRI_MED(); \
1570 KMP_PPC64_PRI_LOC_MB(); \
1573#define KMP_CPU_PAUSE()
1576#define KMP_INIT_YIELD(count) \
1577 { (count) = __kmp_yield_init; }
1579#define KMP_INIT_BACKOFF(time) \
1580 { (time) = __kmp_pause_init; }
1582#define KMP_OVERSUBSCRIBED \
1583 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1585#define KMP_TRY_YIELD \
1586 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1588#define KMP_TRY_YIELD_OVERSUB \
1589 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1591#define KMP_YIELD(cond) \
1594 if ((cond) && (KMP_TRY_YIELD)) \
1598#define KMP_YIELD_OVERSUB() \
1601 if ((KMP_TRY_YIELD_OVERSUB)) \
1607#define KMP_YIELD_SPIN(count) \
1610 if (KMP_TRY_YIELD) { \
1614 (count) = __kmp_yield_next; \
1625#define KMP_TPAUSE_MAX_MASK ((kmp_uint64)0xFFFF)
1626#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1628 if (__kmp_tpause_enabled) { \
1629 if (KMP_OVERSUBSCRIBED) { \
1630 __kmp_tpause(0, (time)); \
1632 __kmp_tpause(__kmp_tpause_hint, (time)); \
1634 (time) = (time << 1 | 1) & KMP_TPAUSE_MAX_MASK; \
1637 if ((KMP_TRY_YIELD_OVERSUB)) { \
1639 } else if (__kmp_use_yield == 1) { \
1643 (count) = __kmp_yield_next; \
1649#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1652 if ((KMP_TRY_YIELD_OVERSUB)) \
1654 else if (__kmp_use_yield == 1) { \
1658 (count) = __kmp_yield_next; \
1686#define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1712typedef HANDLE kmp_thread_t;
1713typedef DWORD kmp_key_t;
1717typedef pthread_t kmp_thread_t;
1718typedef pthread_key_t kmp_key_t;
1737typedef int kmp_itt_mark_t;
1738#define KMP_ITT_DEBUG 0
1770typedef void *(*kmpc_ctor)(
void *);
1783typedef void *(*kmpc_cctor)(
void *,
void *);
1793typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1805typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1857#define KMP_HASH_TABLE_LOG2 9
1858#define KMP_HASH_TABLE_SIZE \
1859 (1 << KMP_HASH_TABLE_LOG2)
1860#define KMP_HASH_SHIFT 3
1861#define KMP_HASH(x) \
1862 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1874#if KMP_USE_HIER_SCHED
1877typedef struct kmp_hier_private_bdata_t {
1881} kmp_hier_private_bdata_t;
1895#if KMP_STATIC_STEAL_ENABLED
1921#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1931#if CACHE_LINE <= 128
1960#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1971#if CACHE_LINE <= 128
2030#if KMP_USE_HIER_SCHED
2066#if KMP_USE_HIER_SCHED
2091#if KMP_USE_INTERNODE_ALIGNMENT
2100#define KMP_INIT_BARRIER_STATE 0
2101#define KMP_BARRIER_SLEEP_BIT 0
2102#define KMP_BARRIER_UNUSED_BIT 1
2103#define KMP_BARRIER_BUMP_BIT 2
2105#define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
2106#define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
2107#define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
2109#if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
2110#error "Barrier sleep bit must be smaller than barrier bump bit"
2112#if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
2113#error "Barrier unused bit must be smaller than barrier bump bit"
2117#define KMP_BARRIER_NOT_WAITING 0
2118#define KMP_BARRIER_OWN_FLAG \
2120#define KMP_BARRIER_PARENT_FLAG \
2122#define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
2124#define KMP_BARRIER_SWITCHING \
2127#define KMP_NOT_SAFE_TO_REAP \
2129#define KMP_SAFE_TO_REAP 1
2144#if KMP_FAST_REDUCTION_BARRIER
2151#if !KMP_FAST_REDUCTION_BARRIER
2152#define bs_reduction_barrier bs_plain_barrier
2167#define KMP_BARRIER_ICV_PUSH 1
2264typedef struct kmp_win32_mutex {
2266 CRITICAL_SECTION cs;
2269typedef struct kmp_win32_cond {
2274 kmp_win32_mutex_t waiters_count_lock_;
2281 int wait_generation_count_;
2293 pthread_cond_t c_cond;
2296typedef union kmp_cond_union kmp_cond_align_t;
2301 pthread_mutex_t m_mutex;
2304typedef union kmp_mutex_union kmp_mutex_align_t;
2316 volatile int ds_alive;
2345#if !USE_CMP_XCHG_FOR_BGET
2346#ifdef USE_QUEUING_LOCK_FOR_BGET
2362#define KMP_CHECK_UPDATE(a, b) \
2365#define KMP_CHECK_UPDATE_SYNC(a, b) \
2367 TCW_SYNC_PTR((a), (b))
2369#define get__blocktime(xteam, xtid) \
2370 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2371#define get__bt_set(xteam, xtid) \
2372 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2374#define get__bt_intervals(xteam, xtid) \
2375 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2378#define get__dynamic_2(xteam, xtid) \
2379 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2380#define get__nproc_2(xteam, xtid) \
2381 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2382#define get__sched_2(xteam, xtid) \
2383 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2385#define set__blocktime_team(xteam, xtid, xval) \
2386 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2390#define set__bt_intervals_team(xteam, xtid, xval) \
2391 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2395#define set__bt_set_team(xteam, xtid, xval) \
2396 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2398#define set__dynamic(xthread, xval) \
2399 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2400#define get__dynamic(xthread) \
2401 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2403#define set__nproc(xthread, xval) \
2404 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2406#define set__thread_limit(xthread, xval) \
2407 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2409#define set__max_active_levels(xthread, xval) \
2410 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2412#define get__max_active_levels(xthread) \
2413 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2415#define set__sched(xthread, xval) \
2416 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2418#define set__proc_bind(xthread, xval) \
2419 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2420#define get__proc_bind(xthread) \
2421 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2445#define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2446#define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2450#define KMP_TASKING_ENABLED(task_team) \
2451 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2490 std::atomic<kmp_int32>
2505#define KMP_DEP_IN 0x1
2506#define KMP_DEP_OUT 0x2
2507#define KMP_DEP_INOUT 0x3
2508#define KMP_DEP_MTX 0x4
2509#define KMP_DEP_SET 0x8
2510#define KMP_DEP_ALL 0x80
2519#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2546#define MAX_MTX_DEPS 4
2554#if KMP_SUPPORT_GRAPH_OUTPUT
2611#define INIT_MAPSIZE 50
2613typedef struct kmp_taskgraph_flags {
2614 unsigned nowait : 1;
2615 unsigned re_record : 1;
2616 unsigned reserved : 30;
2617} kmp_taskgraph_flags_t;
2620typedef struct kmp_node_info {
2624 std::atomic<kmp_int32>
2625 npredecessors_counter;
2632typedef enum kmp_tdg_status {
2634 KMP_TDG_RECORDING = 1,
2639typedef struct kmp_tdg_info {
2641 kmp_taskgraph_flags_t tdg_flags;
2645 kmp_node_info_t *record_map;
2646 kmp_tdg_status_t tdg_status =
2648 std::atomic<kmp_int32> num_tasks;
2652 void *rec_taskred_data;
2657extern int __kmp_tdg_dot;
2659extern kmp_tdg_info_t **__kmp_global_tdgs;
2662extern std::atomic<kmp_int32> __kmp_tdg_task_id;
2666#ifdef BUILD_TIED_TASK_STACK
2669typedef struct kmp_stack_block {
2671 struct kmp_stack_block *sb_next;
2672 struct kmp_stack_block *sb_prev;
2675typedef struct kmp_task_stack {
2676 kmp_stack_block_t ts_first_block;
2684#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2774 std::atomic<kmp_int32>
2784#if defined(KMP_GOMP_COMPAT)
2789#if defined(KMP_GOMP_COMPAT)
2791 void (*td_copy_func)(
void *,
void *);
2798 bool is_taskgraph = 0;
2799 kmp_tdg_info_t *tdg;
2821#ifdef BUILD_TIED_TASK_STACK
2822 kmp_task_stack_t td_susp_tied_tasks;
2827#define TASK_DEQUE_BITS 8
2828#define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2830#define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2831#define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2890#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2893typedef struct kmp_free_list {
2894 void *th_free_list_self;
2895 void *th_free_list_sync;
2897 void *th_free_list_other;
2901#if KMP_NESTED_HOT_TEAMS
2904typedef struct kmp_hot_team_ptr {
2907} kmp_hot_team_ptr_t;
2962 int th_team_bt_intervals;
2968#if KMP_AFFINITY_SUPPORTED
2969 kmp_affin_mask_t *th_affin_mask;
2970 kmp_affinity_ids_t th_topology_ids;
2971 kmp_affinity_attrs_t th_topology_attrs;
2983#if KMP_NESTED_HOT_TEAMS
2984 kmp_hot_team_ptr_t *th_hot_teams;
2990#if KMP_AFFINITY_SUPPORTED
2991 int th_current_place;
3044#if KMP_USE_HIER_SCHED
3046 kmp_hier_private_bdata_t *th_hier_bar_data;
3055#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
3057 kmp_free_list_t th_free_lists[NUM_LISTS];
3062 kmp_win32_cond_t th_suspend_cv;
3063 kmp_win32_mutex_t th_suspend_mx;
3064 std::atomic<int> th_suspend_init;
3067 kmp_cond_align_t th_suspend_cv;
3068 kmp_mutex_align_t th_suspend_mx;
3069 std::atomic<int> th_suspend_init_count;
3073 kmp_itt_mark_t th_itt_mark_single;
3076#if KMP_STATS_ENABLED
3077 kmp_stats_list *th_stats;
3080 std::atomic<bool> th_blocking;
3112#define KMP_MIN_MALLOC_ARGV_ENTRIES 100
3118#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3119#define KMP_INLINE_ARGV_BYTES \
3121 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
3122 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
3125#define KMP_INLINE_ARGV_BYTES \
3126 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
3128#define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
3139 std::atomic<void *> t_tg_reduce_data[2];
3140 std::atomic<int> t_tg_fini_counter[2];
3172#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3193#if KMP_AFFINITY_SUPPORTED
3203#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
3208 char dummy_padding[1024];
3218 std::atomic<kmp_uint32> t_copyin_counter;
3278#if KMP_AFFINITY_SUPPORTED
3279 int r_affinity_assigned;
3307extern int __kmp_forkjoin_frames;
3308extern int __kmp_forkjoin_frames_mode;
3314extern int kmp_a_debug;
3315extern int kmp_b_debug;
3316extern int kmp_c_debug;
3317extern int kmp_d_debug;
3318extern int kmp_e_debug;
3319extern int kmp_f_debug;
3323#define KMP_DEBUG_BUF_LINES_INIT 512
3324#define KMP_DEBUG_BUF_LINES_MIN 1
3326#define KMP_DEBUG_BUF_CHARS_INIT 128
3327#define KMP_DEBUG_BUF_CHARS_MIN 2
3345extern int __kmp_par_range;
3347#define KMP_PAR_RANGE_ROUTINE_LEN 1024
3348extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
3349#define KMP_PAR_RANGE_FILENAME_LEN 1024
3350extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
3351extern int __kmp_par_range_lb;
3352extern int __kmp_par_range_ub;
3362#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3363extern kmp_cpuinfo_t __kmp_cpuinfo;
3365#elif KMP_OS_DARWIN && KMP_ARCH_AARCH64
3378extern volatile int __kmp_init_monitor;
3432extern size_t __kmp_monitor_stksize;
3447extern int __kmp_suspend_count;
3489 if (*bt > INT_MAX / 1000) {
3490 *bt = INT_MAX / 1000;
3491 KMP_INFORM(MaxValueUsing,
"kmp_set_blocktime(ms)", bt);
3499 __kmp_monitor_wakeups;
3500extern int __kmp_bt_intervals;
3503#ifdef KMP_ADJUST_BLOCKTIME
3504extern int __kmp_zero_bt;
3506#ifdef KMP_DFLT_NTH_CORES
3524#ifdef KMP_TDATA_GTID
3529#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3530extern int __kmp_inherit_fp_control;
3531extern kmp_int16 __kmp_init_x87_fpu_control_word;
3543#if KMP_NESTED_HOT_TEAMS
3544extern int __kmp_hot_teams_mode;
3545extern int __kmp_hot_teams_max_level;
3549extern enum clock_function_type __kmp_clock_function;
3550extern int __kmp_clock_function_param;
3553#if KMP_MIC_SUPPORTED
3554extern enum mic_type __kmp_mic_type;
3557#ifdef USE_LOAD_BALANCE
3558extern double __kmp_load_balance_interval;
3561#if KMP_USE_ADAPTIVE_LOCKS
3564struct kmp_adaptive_backoff_params_t {
3572extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3574#if KMP_DEBUG_ADAPTIVE_LOCKS
3575extern const char *__kmp_speculative_statsfile;
3609#define __kmp_get_gtid() __kmp_get_global_thread_id()
3610#define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3611#define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3612#define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3613#define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3618#define __kmp_get_team_num_threads(gtid) \
3619 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3635 return team->
t.t_threads[tid]->th.th_info.ds.ds_gtid;
3640 return thr->th.th_info.ds.ds_gtid;
3658#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3659extern int __kmp_user_level_mwait;
3660extern int __kmp_umwait_enabled;
3661extern int __kmp_mwait_enabled;
3662extern int __kmp_mwait_hints;
3666extern int __kmp_waitpkg_enabled;
3667extern int __kmp_tpause_state;
3668extern int __kmp_tpause_hint;
3669extern int __kmp_tpause_enabled;
3683#define _KMP_GEN_ID(counter) \
3684 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3686#define _KMP_GEN_ID(counter) (~0)
3689#define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3690#define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3695 size_t size,
char const *format, ...);
3721#ifdef USE_LOAD_BALANCE
3731extern void __kmp_warn(
char const *format, ...);
3763#define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3764#define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3765#define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3768extern void *___kmp_fast_allocate(
kmp_info_t *this_thr,
3771extern void __kmp_free_fast_memory(
kmp_info_t *this_thr);
3772extern void __kmp_initialize_fast_memory(
kmp_info_t *this_thr);
3773#define __kmp_fast_allocate(this_thr, size) \
3774 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3775#define __kmp_fast_free(this_thr, ptr) \
3776 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3785#define __kmp_thread_malloc(th, size) \
3786 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3787#define __kmp_thread_calloc(th, nelem, elsize) \
3788 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3789#define __kmp_thread_realloc(th, ptr, size) \
3790 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3791#define __kmp_thread_free(th, ptr) \
3792 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3797 int *num_threads_list);
3806 int num_teams_ub,
int num_threads);
3845#ifdef KMP_GOMP_COMPAT
3894#if KMP_HANDLE_SIGNALS
3895extern int __kmp_handle_signals;
3896extern void __kmp_install_signals(
int parallel_init);
3897extern void __kmp_remove_signals(
void);
3908#if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && (KMP_ARCH_AARCH64 || KMP_ARCH_ARM))
3917#if KMP_AFFINITY_SUPPORTED
3918extern char *__kmp_affinity_print_mask(
char *
buf,
int buf_len,
3919 kmp_affin_mask_t *
mask);
3921 kmp_affin_mask_t *
mask);
3922extern void __kmp_affinity_initialize(kmp_affinity_t &affinity);
3923extern void __kmp_affinity_uninitialize(
void);
3924extern void __kmp_affinity_set_init_mask(
3925 int gtid,
int isa_root);
3926void __kmp_affinity_bind_init_mask(
int gtid);
3927extern void __kmp_affinity_bind_place(
int gtid);
3929extern int __kmp_aux_set_affinity(
void **
mask);
3930extern int __kmp_aux_get_affinity(
void **
mask);
3931extern int __kmp_aux_get_affinity_max_proc();
3932extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **
mask);
3933extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **
mask);
3934extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **
mask);
3935extern void __kmp_balanced_affinity(
kmp_info_t *th,
int team_size);
3936#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
3937extern int __kmp_get_first_osid_with_ecore(
void);
3939#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3941extern int kmp_set_thread_affinity_mask_initial(
void);
3946 if (
r->r.r_uber_thread ==
__kmp_threads[gtid] && !
r->r.r_affinity_assigned) {
3947 __kmp_affinity_set_init_mask(gtid,
TRUE);
3948 __kmp_affinity_bind_init_mask(gtid);
3949 r->r.r_affinity_assigned =
TRUE;
3953 if (!KMP_AFFINITY_CAPABLE())
3957 if (
r->r.r_uber_thread == th &&
r->r.r_affinity_assigned) {
3958 __kmp_set_system_affinity(__kmp_affin_origMask,
FALSE);
3959 KMP_CPU_COPY(th->th.th_affin_mask, __kmp_affin_origMask);
3960 r->r.r_affinity_assigned =
FALSE;
3964#define __kmp_assign_root_init_mask()
3979extern int __kmp_futex_determine_capable(
void);
3991extern void __kmp_create_monitor(
kmp_info_t *th);
4038 ompt_data_t ompt_parallel_data,
4061 size_t reduce_size,
void *reduce_data,
4062 void (*reduce)(
void *,
void *));
4087 int exit_teams = 0);
4124 size_t sizeof_kmp_task_t,
4125 size_t sizeof_shareds,
4156#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr) \
4158 __kmp_tasking_mode != tskm_task_teams || team->t.t_nproc == 1 || \
4159 thr->th.th_task_team == team->t.t_task_team[thr->th.th_task_state])
4161#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr)
4180 void **exit_frame_ptr
4193 size_t vector_length);
4254 size_t cpy_size,
void *cpy_data,
4255 void (*cpy_func)(
void *,
void *),
4270 size_t sizeof_kmp_task_t,
4271 size_t sizeof_shareds,
4321 bool serialize_immediate);
4350 int num,
void *
data);
4386 void **user_lock, uintptr_t hint);
4395static inline bool __kmp_tdg_is_recording(kmp_tdg_status_t
status) {
4396 return status == KMP_TDG_RECORDING;
4409 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4415 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4424 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4444 const char *message);
4451 kmp_int32 *num_threads_list,
int severity,
const char *message);
4488 void *data_addr,
size_t pc_size);
4497#define KMPC_CONVENTION __cdecl
4499#define KMPC_CONVENTION
4532 char const *format);
4545#define KMP_DEVICE_DEFAULT -1
4546#define KMP_DEVICE_ALL -11
4604#define KMP_HIDDEN_HELPER_THREAD(gtid) \
4605 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4607#define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4608 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4610#define KMP_HIDDEN_HELPER_MAIN_THREAD(gtid) \
4611 ((gtid) == 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4613#define KMP_HIDDEN_HELPER_TEAM(team) \
4614 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4618#define KMP_GTID_TO_SHADOW_GTID(gtid) \
4619 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4625 int adjusted_gtid = gtid;
4630 return adjusted_gtid;
4648template <
bool C,
bool S>
4650template <
bool C,
bool S>
4652template <
bool C,
bool S>
4656#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4657template <
bool C,
bool S>
4659template <
bool C,
bool S>
4661template <
bool C,
bool S>
4665template <
bool C,
bool S>
4667template <
bool C,
bool S>
4669template <
bool C,
bool S>
4674template <
bool C,
bool S>
4677 int *thread_finished,
4682template <
bool C,
bool S>
4685 int *thread_finished,
4690template <
bool C,
bool S>
4693 int final_spin,
int *thread_finished,
4700 int *thread_finished,
4722 if (
f &&
f != stdout &&
f != stderr) {
4731 const char *env_var =
nullptr)
4743 const char *env_var =
nullptr) {
4745 f = fopen(filename,
mode);
4761 f = fopen(filename,
mode);
4779 operator FILE *() {
return f; }
4782template <
typename SourceType,
typename TargetType,
4783 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4784 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4785 bool isSourceSigned = std::is_signed<SourceType>::value,
4786 bool isTargetSigned = std::is_signed<TargetType>::value>
4790template <
typename SourceType,
typename TargetType>
4792 static TargetType
to(SourceType src) {
return (TargetType)src; }
4795template <
typename SourceType,
typename TargetType>
4797 static TargetType
to(SourceType src) {
return src; }
4800template <
typename SourceType,
typename TargetType>
4802 static TargetType
to(SourceType src) {
4804 (std::numeric_limits<TargetType>::max)()));
4806 (std::numeric_limits<TargetType>::min)()));
4807 return (TargetType)src;
4813template <
typename SourceType,
typename TargetType>
4815 static TargetType
to(SourceType src) {
4817 return (TargetType)src;
4821template <
typename SourceType,
typename TargetType>
4823 static TargetType
to(SourceType src) {
4825 return (TargetType)src;
4829template <
typename SourceType,
typename TargetType>
4830struct kmp_convert<SourceType, TargetType, false, false, true, false> {
4831 static TargetType
to(SourceType src) {
4834 (std::numeric_limits<TargetType>::max)()));
4835 return (TargetType)src;
4841template <
typename SourceType,
typename TargetType>
4843 static TargetType
to(SourceType src) {
return (TargetType)src; }
4846template <
typename SourceType,
typename TargetType>
4848 static TargetType
to(SourceType src) {
4850 (std::numeric_limits<TargetType>::max)()));
4851 return (TargetType)src;
4855template <
typename SourceType,
typename TargetType>
4856struct kmp_convert<SourceType, TargetType, false, false, false, true> {
4857 static TargetType
to(SourceType src) {
4859 (std::numeric_limits<TargetType>::max)()));
4860 return (TargetType)src;
4866template <
typename SourceType,
typename TargetType>
4867struct kmp_convert<SourceType, TargetType, true, false, false, false> {
4868 static TargetType
to(SourceType src) {
return (TargetType)src; }
4871template <
typename SourceType,
typename TargetType>
4872struct kmp_convert<SourceType, TargetType, false, true, false, false> {
4873 static TargetType
to(SourceType src) {
return src; }
4876template <
typename SourceType,
typename TargetType>
4877struct kmp_convert<SourceType, TargetType, false, false, false, false> {
4878 static TargetType
to(SourceType src) {
4880 (std::numeric_limits<TargetType>::max)()));
4881 return (TargetType)src;
4885template <
typename T1,
typename T2>
void * target(void *task)
int task_entry(kmp_int32 gtid, kmp_task_t *task)
This class safely opens and closes a C-style FILE* object using RAII semantics.
void set_stdout()
Set the FILE* object to stdout and output there No open call should happen before this call.
void set_stderr()
Set the FILE* object to stderr and output there No open call should happen before this call.
kmp_safe_raii_file_t(const kmp_safe_raii_file_t &other)=delete
int try_open(const char *filename, const char *mode)
Instead of erroring out, return non-zero when unsuccessful fopen() for any reason.
kmp_safe_raii_file_t & operator=(const kmp_safe_raii_file_t &other)=delete
kmp_safe_raii_file_t(const char *filename, const char *mode, const char *env_var=nullptr)
void open(const char *filename, const char *mode, const char *env_var=nullptr)
Open filename using mode.
kmp_int32(*)(kmp_int32, void *) kmp_routine_entry_t
struct kmp_task kmp_task_t
struct ident ident_t
The ident structure that describes a source location.
union kmp_cmplrdata kmp_cmplrdata_t
@ KMP_IDENT_BARRIER_IMPL_FOR
@ KMP_IDENT_KMPC
Use c-style ident structure.
@ KMP_IDENT_ATOMIC_HINT_CONTENDED
@ KMP_IDENT_BARRIER_IMPL_MASK
@ KMP_IDENT_BARRIER_IMPL_SECTIONS
@ KMP_IDENT_IMB
Use trampoline for internal microtasks.
@ KMP_IDENT_BARRIER_IMPL_WORKSHARE
@ KMP_IDENT_WORK_LOOP
To mark a static loop in OMPT callbacks.
@ KMP_IDENT_BARRIER_IMPL
To Mark implicit barriers.
@ KMP_IDENT_ATOMIC_HINT_UNCONTENDED
@ KMP_IDENT_WORK_SECTIONS
To mark a sections directive in OMPT callbacks.
@ KMP_IDENT_AUTOPAR
Entry point generated by auto-parallelization.
@ KMP_IDENT_ATOMIC_HINT_SPECULATIVE
@ KMP_IDENT_BARRIER_IMPL_SINGLE
@ KMP_IDENT_ATOMIC_HINT_MASK
Atomic hint; bottom four bits as omp_sync_hint_t.
@ KMP_IDENT_WORK_DISTRIBUTE
To mark a distribute construct in OMPT callbacks.
@ KMP_IDENT_OPENMP_SPEC_VERSION_MASK
@ KMP_IDENT_BARRIER_EXPL
To mark a 'barrier' directive in user code.
@ KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE
@ KMP_IDENT_ATOMIC_REDUCE
Compiler generates atomic reduction option for kmpc_reduce*.
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_fork_call_if(ident_t *loc, kmp_int32 nargs, kmpc_micro microtask, kmp_int32 cond, void *args)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_set_thread_limit(ident_t *loc, kmp_int32 global_tid, kmp_int32 thread_limit)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_threads_list(ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length, kmp_int32 *num_threads_list)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT bool __kmpc_omp_has_task_team(kmp_int32 gtid)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
KMP_EXPORT void ** __kmpc_omp_get_target_async_handle_ptr(kmp_int32 gtid)
void(* kmpc_dtor)(void *)
Pointer to the destructor function.
void *(* kmpc_cctor)(void *, void *)
Pointer to an alternate constructor.
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Array constructor.
void *(* kmpc_ctor)(void *)
Pointer to the constructor function.
KMP_EXPORT void * __kmpc_copyprivate_light(ident_t *loc, kmp_int32 gtid, void *cpy_data)
void *(* kmpc_ctor_vec)(void *, size_t)
Array constructor.
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void(* kmpc_dtor_vec)(void *, size_t)
Pointer to the array destructor function.
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
sched_type
Describes the loop schedule to be used for a parallel for loop.
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_next_section(ident_t *loc, kmp_int32 global_tid, kmp_int32 numberOfSections)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_sections(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_sections_init(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_nm_guided_chunked
guided unspecialized
@ kmp_sch_runtime_simd
runtime with chunk adjustment
@ kmp_nm_ord_dynamic_chunked
@ kmp_distribute_static_chunked
distribute static chunked
@ kmp_sch_static
static unspecialized
@ kmp_sch_guided_simd
guided with chunk adjustment
@ kmp_ord_dynamic_chunked
@ kmp_sch_modifier_monotonic
Set if the monotonic schedule modifier was present.
@ kmp_sch_default
default scheduling algorithm
@ kmp_sch_modifier_nonmonotonic
Set if the nonmonotonic schedule modifier was present.
@ kmp_nm_ord_static
ordered static unspecialized
@ kmp_distribute_static
distribute static unspecialized
@ kmp_sch_guided_chunked
guided unspecialized
@ kmp_sch_dynamic_chunked
@ kmp_sch_guided_analytical_chunked
@ kmp_sch_static_balanced
@ kmp_nm_static
static unspecialized
@ kmp_sch_lower
lower bound for unordered values
@ kmp_nm_guided_analytical_chunked
@ kmp_nm_upper
upper bound for nomerge values
@ kmp_ord_lower
lower bound for ordered values, must be power of 2
@ kmp_ord_static
ordered static unspecialized
@ kmp_sch_guided_iterative_chunked
@ kmp_sch_static_balanced_chunked
@ kmp_sch_upper
upper bound for unordered values
@ kmp_ord_upper
upper bound for ordered values
@ kmp_nm_lower
lower bound for nomerge values
@ kmp_nm_guided_iterative_chunked
@ kmp_ord_auto
ordered auto
@ kmp_nm_ord_static_chunked
@ kmp_nm_ord_guided_chunked
__itt_string_handle * name
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int initialized
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope ITT_FORMAT d const __itt_domain __itt_scope __itt_string_handle const char size_t ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_relation __itt_id ITT_FORMAT lu __itt_track_group __itt_string_handle __itt_track_group_type ITT_FORMAT d __itt_track ITT_FORMAT p void int const int int const char int ITT_FORMAT d void void const char * path
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event event
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t mode
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team)
struct kmp_disp kmp_disp_t
int __kmp_memkind_available
omp_memspace_handle_t const omp_default_mem_space
KMP_EXPORT void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_finish_implicit_task(kmp_info_t *this_thr)
void * omp_memspace_handle_t
volatile kmp_team_t * __kmp_team_pool
KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(ident_t *loc_ref, kmp_int32 gtid)
int __kmp_pause_resource(kmp_pause_status_t level)
void * omp_allocator_handle_t
void __kmp_warn(char const *format,...)
void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL)
void KMPC_SET_DYNAMIC(int flag)
kmp_bar_pat_e __kmp_barrier_release_pat_dflt
struct kmp_dephash kmp_dephash_t
kmp_info_t * __kmp_hidden_helper_main_thread
int __kmp_generate_warnings
volatile int __kmp_init_user_locks
int __kmp_debug_buf_lines
omp_allocator_handle_t __kmpc_init_allocator(int gtid, omp_memspace_handle_t, int ntraits, omp_alloctrait_t traits[])
kmp_proc_bind_t __kmp_teams_proc_bind
KMP_EXPORT void KMPC_CONVENTION kmpc_set_library(int)
kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker)
void ompc_set_dynamic(int flag)
kmp_bootstrap_lock_t __kmp_initz_lock
void __kmp_aux_set_defaults(char const *str, size_t len)
int __kmp_display_env_verbose
omp_allocator_handle_t const omp_cgroup_mem_alloc
kmp_global_t __kmp_global
void __kmp_set_schedule(int gtid, kmp_sched_t new_sched, int chunk)
void __kmp_init_target_mem()
KMP_EXPORT void * kmpc_malloc(size_t size)
void __kmp_hidden_helper_worker_thread_signal()
KMP_EXPORT void __kmpc_push_num_threads_list_strict(ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length, kmp_int32 *num_threads_list, int severity, const char *message)
void __kmp_teams_master(int gtid)
void __kmp_elapsed_tick(double *)
void __kmp_common_destroy(void)
void __kmp_common_initialize(void)
#define KMP_HASH_TABLE_SIZE
void __kmp_release_64(kmp_flag_64<> *flag)
void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al)
kmp_pause_status_t __kmp_pause_status
struct kmp_teams_size kmp_teams_size_t
kmp_lock_t __kmp_debug_lock
enum kmp_target_offload_kind kmp_target_offload_kind_t
void __kmp_read_system_time(double *delta)
KMP_NORETURN void __kmp_abort_process(void)
void __kmp_free_thread(kmp_info_t *)
KMP_EXPORT kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task)
void __kmp_enable(int old_state)
kmp_bootstrap_lock_t __kmp_tp_cached_lock
void __kmp_check_stack_overlap(kmp_info_t *thr)
struct kmp_base_root kmp_base_root_t
void * __kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al)
void __kmp_infinite_loop(void)
kmp_info_t * __kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team, int tid)
KMP_EXPORT void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_team_t * __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc, kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs, int argc USE_NESTED_HOT_ARG(kmp_info_t *thr))
void __kmp_reap_task_teams(void)
omp_memspace_handle_t const llvm_omp_target_host_mem_space
kmp_int32 __kmp_use_yield
char const * __kmp_barrier_type_name[bs_last_barrier]
char const * __kmp_barrier_pattern_name[bp_last_bar]
int __kmp_dflt_team_nth_ub
void __kmp_hidden_helper_threads_initz_wait()
void __kmp_pop_task_team_node(kmp_info_t *thread, kmp_team_t *team)
int __kmp_aux_get_num_teams()
KMP_EXPORT int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
struct dispatch_shared_info dispatch_shared_info_t
struct kmp_taskgroup kmp_taskgroup_t
struct kmp_hws_item kmp_hws_item_t
void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr, kmp_team_t *team)
void __kmp_wait_to_unref_task_teams(void)
struct KMP_ALIGN_CACHE kmp_base_info kmp_base_info_t
void __kmp_wait_4_ptr(void *spinner, kmp_uint32 checker, kmp_uint32(*pred)(void *, kmp_uint32), void *obj)
void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team)
int __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, void *reduce_data, void(*reduce)(void *, void *))
struct KMP_ALIGN_CACHE dispatch_private_info dispatch_private_info_t
int __kmp_get_max_active_levels(int gtid)
KMP_EXPORT void * kmpc_aligned_malloc(size_t size, size_t alignment)
void ___kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al)
void __kmp_aux_set_library(enum library_type arg)
void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size, char const *format,...)
void __kmp_push_num_teams_51(ident_t *loc, int gtid, int num_teams_lb, int num_teams_ub, int num_threads)
#define __kmp_assign_root_init_mask()
int __kmp_dflt_max_active_levels
KMP_EXPORT void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32 num_dims, const struct kmp_dim *dims)
void __kmp_unlock_suspend_mx(kmp_info_t *th)
kmp_bar_pat_e __kmp_barrier_gather_pat_dflt
unsigned short __kmp_get_random(kmp_info_t *thread)
void * __kmpc_calloc(int gtid, size_t nmemb, size_t sz, omp_allocator_handle_t al)
static kmp_team_t * __kmp_team_from_gtid(int gtid)
int __kmp_register_root(int initial_thread)
int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
omp_memspace_handle_t const omp_low_lat_mem_space
void __kmp_do_initialize_hidden_helper_threads()
int __kmp_storage_map_verbose_specified
struct kmp_local kmp_local_t
omp_allocator_handle_t __kmpc_get_default_allocator(int gtid)
kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier]
void __kmp_thread_sleep(int millis)
KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part)
kmp_tasking_mode_t __kmp_tasking_mode
char * __kmp_affinity_format
void __kmp_abort_thread(void)
volatile kmp_info_t * __kmp_thread_pool
void __kmp_internal_end_atexit(void)
kmp_hws_item_t __kmp_hws_die
KMP_EXPORT void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar)
volatile int __kmp_init_gtid
omp_allocator_handle_t __kmp_def_allocator
kmp_hws_item_t __kmp_hws_node
union KMP_ALIGN_CACHE kmp_sleep_team kmp_sleep_team_t
kmp_bootstrap_lock_t __kmp_task_team_lock
void * __kmp_alloc(int gtid, size_t align, size_t sz, omp_allocator_handle_t al)
int __kmp_omp_cancellation
void __kmp_suspend_64(int th_gtid, kmp_flag_64< C, S > *flag)
static void __kmp_resume_if_hard_paused()
kmp_hws_item_t __kmp_hws_tile
void * __kmp_calloc(int gtid, size_t align, size_t nmemb, size_t sz, omp_allocator_handle_t al)
kmp_nested_proc_bind_t __kmp_nested_proc_bind
void __kmp_free_implicit_task(kmp_info_t *this_thr)
KMP_EXPORT void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void KMP_EXPAND_NAME() ompc_set_affinity_format(char const *format)
void __kmp_hidden_helper_main_thread_release()
void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, void *data_addr, size_t pc_size)
fork_context_e
Tell the fork call which compiler generated the fork call, and therefore how to deal with the call.
@ fork_context_gnu
Called from GNU generated code, so must not invoke the microtask internally.
@ fork_context_intel
Called from Intel generated code.
void __kmp_exit_single(int gtid)
struct KMP_ALIGN_CACHE dispatch_private_info32 dispatch_private_info32_t
void __kmp_suspend_initialize(void)
int __kmp_get_team_size(int gtid, int level)
kmp_nested_nthreads_t __kmp_nested_nth
KMP_EXPORT void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind)
omp_allocator_handle_t const omp_default_mem_alloc
kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker)
kmp_cached_addr_t * __kmp_threadpriv_cache_list
@ atomic_flag64
atomic 64 bit flags
@ flag_oncore
special 64-bit flag for on-core barrier (hierarchical)
@ flag32
atomic 32 bit flags
void __kmp_internal_end_dtor(void)
kmp_uint64 __kmp_now_nsec()
KMP_EXPORT int __kmp_get_cancellation_status(int cancel_kind)
void KMP_EXPAND_NAME() ompc_display_affinity(char const *format)
volatile int __kmp_all_nth
void __kmp_check_stksize(size_t *val)
kmp_target_offload_kind_t __kmp_target_offload
int __kmp_debug_buf_chars
int __kmpc_get_target_offload()
void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref)
KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid, kmp_critical_name *, uint32_t hint)
int __kmp_get_global_thread_id_reg(void)
void __kmp_push_num_threads(ident_t *loc, int gtid, int num_threads)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize(int)
#define SCHEDULE_HAS_MONOTONIC(s)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
int __kmp_is_address_mapped(void *addr)
kmp_lock_t __kmp_global_lock
int __kmp_barrier_gomp_cancel(int gtid)
double __kmp_read_cpu_time(void)
void __kmpc_destroy_allocator(int gtid, omp_allocator_handle_t al)
union KMP_ALIGN_CACHE kmp_root kmp_root_t
int __kmp_adjust_gtid_mode
#define __kmp_entry_gtid()
kmp_old_threads_list_t * __kmp_old_threads_list
void __kmp_internal_end_library(int gtid)
void __kmp_suspend_32(int th_gtid, kmp_flag_32< C, S > *flag)
struct kmp_internal_control kmp_internal_control_t
void __kmp_hidden_helper_worker_thread_wait()
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
volatile int __kmp_init_common
void __kmp_set_max_active_levels(int gtid, int new_max_active_levels)
void __kmpc_dispatch_deinit(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_deinit.
enum sched_type __kmp_auto
void __kmp_init_random(kmp_info_t *thread)
static int __kmp_tid_from_gtid(int gtid)
static bool KMP_UBER_GTID(int gtid)
kmp_int32 __kmp_use_yield_exp_set
kmp_event_t * __kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid, kmp_task_t *task)
void __kmp_internal_end_thread(int gtid)
struct kmp_sys_info kmp_sys_info_t
KMP_EXPORT void __kmp_set_num_teams(int num_teams)
void __kmp_disable(int *old_state)
omp_allocator_handle_t const omp_large_cap_mem_alloc
volatile int __kmp_init_hidden_helper
void __kmp_push_num_threads_list(ident_t *loc, int gtid, kmp_uint32 list_length, int *num_threads_list)
struct kmp_depend_info kmp_depend_info_t
void __kmp_user_set_library(enum library_type arg)
const char * __kmp_hw_get_catalog_string(kmp_hw_t type, bool plural=false)
KMP_EXPORT void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
void __kmpc_set_default_allocator(int gtid, omp_allocator_handle_t al)
omp_allocator_handle_t const omp_low_lat_mem_alloc
@ KMP_EVENT_UNINITIALIZED
@ KMP_EVENT_ALLOW_COMPLETION
void __kmp_elapsed(double *)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_disp_num_buffers(int)
int __kmp_gtid_get_specific(void)
int __kmp_aux_get_team_num()
struct KMP_ALIGN_CACHE dispatch_private_info64 dispatch_private_info64_t
KMP_EXPORT void __kmpc_taskloop(ident_t *loc, kmp_int32 gtid, kmp_task_t *task, kmp_int32 if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup, kmp_int32 sched, kmp_uint64 grainsize, void *task_dup)
volatile int __kmp_init_middle
void __kmp_hidden_helper_threads_deinitz_wait()
omp_allocator_handle_t const omp_high_bw_mem_alloc
void __kmp_set_num_threads(int new_nth, int gtid)
std::atomic< kmp_int32 > __kmp_task_counter
void __kmpc_error(ident_t *loc, int severity, const char *message)
static kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind)
KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task)
KMP_EXPORT kmp_task_t * __kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry)
KMP_EXPORT void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid)
kmp_r_sched_t __kmp_get_schedule_global(void)
int __kmp_storage_map_verbose
int __kmp_allThreadsSpecified
enum sched_type __kmp_static
int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
void kmpc_set_blocktime(int arg)
KMP_EXPORT void __kmpc_taskloop_5(ident_t *loc, kmp_int32 gtid, kmp_task_t *task, kmp_int32 if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup, kmp_int32 sched, kmp_uint64 grainsize, kmp_int32 modifier, void *task_dup)
KMP_EXPORT void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task)
PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
enum kmp_tasking_mode kmp_tasking_mode_t
void * __kmp_realloc(int gtid, void *ptr, size_t sz, omp_allocator_handle_t al, omp_allocator_handle_t free_al)
void __kmp_end_split_barrier(enum barrier_type bt, int gtid)
int PACKED_REDUCTION_METHOD_T
std::atomic< int > __kmp_thread_pool_active_nth
void __kmp_hidden_helper_threads_initz_routine()
KMP_EXPORT void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid)
const char * __kmp_hw_get_keyword(kmp_hw_t type, bool plural=false)
union KMP_ALIGN_CACHE kmp_thread_data kmp_thread_data_t
kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker)
KMP_EXPORT void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, const kmp_int64 *vec)
int __kmp_affinity_num_places
int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws)
int __kmp_duplicate_library_ok
void * ___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL)
struct kmp_base_data kmp_base_data_t
struct kmp_base_thread_data kmp_base_thread_data_t
volatile int __kmp_need_register_serial
#define KMP_PAD(type, sz)
void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team)
kmp_bootstrap_lock_t __kmp_forkjoin_lock
KMP_EXPORT kmp_uint64 __kmpc_get_taskid()
omp_memspace_handle_t const omp_const_mem_space
struct kmp_cg_root kmp_cg_root_t
kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier]
KMP_EXPORT int KMPC_CONVENTION kmpc_get_affinity_mask_proc(int, kmp_affinity_mask_t *)
static kmp_info_t * __kmp_entry_thread()
KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid)
void __kmp_init_memkind()
struct kmp_task_affinity_info kmp_task_affinity_info_t
int __kmp_get_ancestor_thread_num(int gtid, int level)
void __kmp_hidden_helper_main_thread_wait()
void * __kmp_launch_thread(kmp_info_t *thr)
void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr, kmp_team_t *team, int tid, int set_curr_task)
kmp_int32 __kmp_default_device
void __kmp_omp_display_env(int verbose)
void __kmp_cleanup_threadprivate_caches()
void __kmp_middle_initialize(void)
static void copy_icvs(kmp_internal_control_t *dst, kmp_internal_control_t *src)
KMP_EXPORT void __kmpc_end_taskgroup(ident_t *loc, int gtid)
kmp_bootstrap_lock_t __kmp_exit_lock
KMP_EXPORT void __kmpc_omp_taskwait_deps_51(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list, kmp_int32 has_no_wait)
omp_memspace_handle_t const omp_large_cap_mem_space
int __kmp_force_monotonic
kmp_info_t ** __kmp_threads
void __kmp_abort(char const *format,...)
void __kmp_hidden_helper_initz_release()
enum sched_type __kmp_sched
void __kmp_suspend_uninitialize_thread(kmp_info_t *th)
void * ___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL)
struct kmp_cached_addr kmp_cached_addr_t
int __kmp_enable_task_throttling
void __kmp_unregister_root(int gtid)
void __kmp_finalize_bget(kmp_info_t *th)
static void __kmp_reset_root_init_mask(int gtid)
kmp_uint32 __kmp_barrier_gather_bb_dflt
KMP_EXPORT void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task)
kmp_uint32 __kmp_barrier_release_bb_dflt
struct dispatch_shared_info32 dispatch_shared_info32_t
int __kmp_task_stealing_constraint
int __kmp_need_register_atfork
struct private_common * kmp_threadprivate_insert(int gtid, void *pc_addr, void *data_addr, size_t pc_size)
void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref)
struct kmp_target_data kmp_target_data_t
int __kmp_dispatch_num_buffers
KMP_EXPORT void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
#define SCHEDULE_WITHOUT_MODIFIERS(s)
kmp_uint32 __kmp_yield_init
KMP_EXPORT void __kmp_set_teams_thread_limit(int limit)
void __kmp_internal_end_dest(void *)
void * __kmpc_realloc(int gtid, void *ptr, size_t sz, omp_allocator_handle_t al, omp_allocator_handle_t free_al)
void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag)
bool __kmp_dflt_max_active_levels_set
void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr, kmp_team_t *team)
size_t KMP_EXPAND_NAME() ompc_get_affinity_format(char *buffer, size_t size)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_defaults(char const *)
void __kmp_lock_suspend_mx(kmp_info_t *th)
struct dispatch_shared_info64 dispatch_shared_info64_t
omp_memspace_handle_t const llvm_omp_target_shared_mem_space
char * __kmp_debug_buffer
omp_memspace_handle_t const omp_high_bw_mem_space
void __kmp_parallel_initialize(void)
void __kmp_terminate_thread(int gtid)
int __kmp_nesting_mode_nlevels
void __kmp_set_nesting_mode_threads()
void __kmp_unregister_library(void)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
See __kmpc_dispatch_next_4.
int(* launch_t)(int gtid)
int __kmp_ignore_mppbeg(void)
void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size)
int * __kmp_nesting_nth_level
KMP_EXPORT void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, const kmp_int64 *vec)
omp_allocator_handle_t const omp_const_mem_alloc
KMP_EXPORT void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
volatile int __kmp_init_parallel
KMP_EXPORT void __kmpc_push_num_threads_strict(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads, int severity, const char *message)
omp_allocator_handle_t const omp_pteam_mem_alloc
kmp_queuing_lock_t __kmp_dispatch_lock
KMP_EXPORT int KMPC_CONVENTION kmpc_set_affinity_mask_proc(int, kmp_affinity_mask_t *)
omp_allocator_handle_t const llvm_omp_target_host_mem_alloc
int __kmp_need_register_atfork_specified
omp_allocator_handle_t const kmp_max_mem_alloc
kmp_int32 __kmp_enable_hidden_helper
struct kmp_desc_base kmp_desc_base_t
enum kmp_sched kmp_sched_t
void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team)
void __kmp_aux_set_stacksize(size_t arg)
static const size_t KMP_AFFINITY_FORMAT_SIZE
enum library_type __kmp_library
void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid)
void __kmp_push_num_teams(ident_t *loc, int gtid, int num_teams, int num_threads)
struct kmp_tasking_flags kmp_tasking_flags_t
omp_memspace_handle_t kmp_memspace_t
static bool __kmp_is_hybrid_cpu()
void __kmp_clear_system_time(void)
KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid()
struct KMP_ALIGN_CACHE kmp_base_team kmp_base_team_t
size_t __kmp_aux_capture_affinity(int gtid, const char *format, kmp_str_buf_t *buffer)
KMP_EXPORT int __kmp_get_max_teams(void)
void KMPC_SET_NESTED(int flag)
void(* kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,...)
void __kmp_get_schedule(int gtid, kmp_sched_t *sched, int *chunk)
kmp_team_t * __kmp_reap_team(kmp_team_t *)
kmp_key_t __kmp_gtid_threadprivate_key
KMP_EXPORT void * __kmpc_threadprivate(ident_t *, kmp_int32 global_tid, void *data, size_t size)
struct kmp_task_pri kmp_task_pri_t
kmp_hws_item_t __kmp_hws_socket
KMP_EXPORT void KMPC_CONVENTION ompc_set_schedule(omp_sched_t, int)
int __kmp_fork_call(ident_t *loc, int gtid, enum fork_context_e fork_context, kmp_int32 argc, microtask_t microtask, launch_t invoker, kmp_va_list ap)
kmp_info_t * __kmp_thread_pool_insert_pt
KMP_EXPORT kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind)
KMP_EXPORT void * kmpc_calloc(size_t nelem, size_t elsize)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
See __kmpc_dispatch_next_4.
int __kmp_get_global_thread_id(void)
#define USE_NESTED_HOT_ARG(x)
int __kmp_env_consistency_check
#define bs_reduction_barrier
void __kmp_runtime_destroy(void)
kmp_uint64 __kmp_pause_init
kmp_uint64 __kmp_taskloop_min_tasks
KMP_EXPORT int KMPC_CONVENTION ompc_get_ancestor_thread_num(int)
union KMP_ALIGN_CACHE kmp_desc kmp_desc_t
char const * __kmp_barrier_branch_bit_env_name[bs_last_barrier]
kmp_hws_item_t __kmp_hws_proc
void __kmp_aux_display_affinity(int gtid, const char *format)
static void __kmp_sched_apply_mods_intkind(kmp_sched_t kind, enum sched_type *internal_kind)
void __kmp_fulfill_event(kmp_event_t *event)
KMP_EXPORT void __kmpc_taskgroup(ident_t *loc, int gtid)
int __kmp_read_system_info(struct kmp_sys_info *info)
void * ___kmp_thread_realloc(kmp_info_t *th, void *ptr, size_t size KMP_SRC_LOC_DECL)
kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker)
volatile int __kmp_hidden_helper_team_done
KMP_EXPORT kmp_depnode_list_t * __kmpc_task_get_successors(kmp_task_t *task)
void __kmp_push_proc_bind(ident_t *loc, int gtid, kmp_proc_bind_t proc_bind)
static void __kmp_sched_apply_mods_stdkind(kmp_sched_t *kind, enum sched_type internal_kind)
struct kmp_base_depnode kmp_base_depnode_t
void __kmp_init_nesting_mode()
void __kmp_free_team(kmp_root_t *, kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *))
std::atomic< kmp_int32 > __kmp_unexecuted_hidden_helper_tasks
KMP_EXPORT int KMPC_CONVENTION kmpc_unset_affinity_mask_proc(int, kmp_affinity_mask_t *)
KMP_EXPORT void __kmpc_end_scope(ident_t *loc, kmp_int32 gtid, void *reserved)
KMP_EXPORT int __kmpc_invoke_task_func(int gtid)
void * __kmpc_aligned_alloc(int gtid, size_t align, size_t sz, omp_allocator_handle_t al)
size_t __kmp_sys_min_stksize
char __kmp_blocktime_units
void * ___kmp_allocate(size_t size KMP_SRC_LOC_DECL)
KMP_EXPORT void KMPC_CONVENTION ompc_set_max_active_levels(int)
struct kmp_sched_flags kmp_sched_flags_t
kmp_hws_item_t __kmp_hws_core
union KMP_ALIGN_CACHE kmp_ordered_team kmp_ordered_team_t
int __kmp_invoke_task_func(int gtid)
struct kmp_base_global kmp_base_global_t
void ompc_set_nested(int flag)
void __kmp_set_strict_num_threads(ident_t *loc, int gtid, int sev, const char *msg)
kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier]
KMP_EXPORT void __kmpc_scope(ident_t *loc, kmp_int32 gtid, void *reserved)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize_s(size_t)
size_t __kmp_malloc_pool_incr
static int __kmp_adjust_gtid_for_hidden_helpers(int gtid)
kmp_task_t * __kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry)
void __kmp_adjust_num_threads(int new_nproc)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
KMP_EXPORT void kmpc_free(void *ptr)
int __kmp_threads_capacity
KMP_EXPORT int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_info_t ** __kmp_hidden_helper_threads
kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker)
void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team, int tid)
int __kmp_debug_buf_warn_chars
static int __kmp_gtid_from_tid(int tid, const kmp_team_t *team)
KMP_EXPORT void KMPC_FOR_STATIC_INIT(ident_t *loc, kmp_int32 global_tid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int *plower, kmp_int *pupper, kmp_int *pstride, kmp_int incr, kmp_int chunk)
void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag)
bool __kmp_wpolicy_passive
void __kmp_save_internal_controls(kmp_info_t *thread)
size_t KMP_EXPAND_NAME() ompc_capture_affinity(char *buffer, size_t buf_size, char const *format)
void __kmp_push_task_team_node(kmp_info_t *thread, kmp_team_t *team)
void __kmp_threadprivate_resize_cache(int newCapacity)
union kmp_r_sched kmp_r_sched_t
void __kmp_runtime_initialize(void)
int __kmp_invoke_teams_master(int gtid)
void __kmp_hidden_helper_initialize()
volatile int __kmp_init_hidden_helper_threads
void KMPC_SET_NUM_THREADS(int arg)
KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind)
void __kmp_common_destroy_gtid(int gtid)
int __kmp_try_suspend_mx(kmp_info_t *th)
static void __kmp_aux_convert_blocktime(int *bt)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
See __kmpc_dispatch_next_4.
int __kmp_display_affinity
enum sched_type __kmp_guided
void __kmp_resume_64(int target_gtid, kmp_flag_64< C, S > *flag)
void __kmp_resume_32(int target_gtid, kmp_flag_32< C, S > *flag)
KMP_EXPORT int __kmp_get_teams_thread_limit(void)
#define KMP_INLINE_ARGV_ENTRIES
int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
const char * __kmp_hw_get_core_type_string(kmp_hw_core_type_t type)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method
void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64< C, S > *flag)
KMP_EXPORT int KMPC_CONVENTION ompc_get_team_size(int)
void * kmp_affinity_mask_t
void __kmp_serial_initialize(void)
omp_allocator_handle_t const omp_thread_mem_alloc
kmp_uint32 __kmp_wait_4(kmp_uint32 volatile *spinner, kmp_uint32 checker, kmp_uint32(*pred)(kmp_uint32, kmp_uint32), void *obj)
void __kmp_resume_if_soft_paused()
KMP_EXPORT void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
kmp_int32 __kmp_max_task_priority
void __kmp_initialize_bget(kmp_info_t *th)
static void __kmp_assert_valid_gtid(kmp_int32 gtid)
int __kmp_teams_thread_limit
KMP_EXPORT void * kmpc_realloc(void *ptr, size_t size)
void __kmp_cleanup_hierarchy()
KMP_EXPORT kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id)
KMP_EXPORT void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_serialized_parallel(ident_t *id, kmp_int32 gtid)
void ompc_set_num_threads(int arg)
kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task, bool serialize_immediate)
struct kmp_base_task_team kmp_base_task_team_t
void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr)
void __kmp_gtid_set_specific(int gtid)
char const * __kmp_barrier_pattern_env_name[bs_last_barrier]
void __kmp_internal_begin(void)
std::atomic< int > __kmp_debug_count
void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team)
int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
static kmp_info_t * __kmp_thread_from_gtid(int gtid)
void __kmp_expand_file_name(char *result, size_t rlen, char *pattern)
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
void * ___kmp_thread_calloc(kmp_info_t *th, size_t nelem, size_t elsize KMP_SRC_LOC_DECL)
static int __kmp_gtid_from_thread(const kmp_info_t *thr)
omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc
@ KMP_HW_MAX_NUM_CORE_TYPES
@ KMP_HW_CORE_TYPE_UNKNOWN
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
See __kmpc_dispatch_init_4.
void __kmp_suspend_initialize_thread(kmp_info_t *th)
volatile int __kmp_init_serial
@ reduction_method_not_defined
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
See __kmpc_dispatch_init_4.
void __kmp_exit_thread(int exit_status)
KMP_EXPORT kmp_base_depnode_t * __kmpc_task_get_depnode(kmp_task_t *task)
int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int npr, int argc, void *argv[])
kmp_int32 __kmp_hidden_helper_threads_num
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
static void __kmp_type_convert(T1 src, T2 *dest)
void __kmp_join_call(ident_t *loc, int gtid, int exit_teams=0)
enum kmp_bar_pat kmp_bar_pat_e
void __kmp_fini_memkind()
KMP_EXPORT kmp_int32 __kmp_get_reduce_method(void)
omp_memspace_handle_t const llvm_omp_target_device_mem_space
int __kmp_ignore_mppend(void)
void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag, int final_spin)
int __kmp_debug_buf_atomic
struct KMP_ALIGN_CACHE kmp_bstate kmp_bstate_t
KMP_EXPORT void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier]
std::atomic< kmp_int32 > __kmp_team_counter
void __kmp_reap_worker(kmp_info_t *th)
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
See __kmpc_dispatch_init_4.
void __kmp_hidden_helper_threads_deinitz_release()
void __kmp_expand_host_name(char *buffer, size_t size)
int __kmpc_pause_resource(kmp_pause_status_t level)
union KMP_ALIGN_CACHE kmp_info kmp_info_t
enum sched_type __kmp_sch_map[]
void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team, int wait=1)
kmp_uint64 __kmp_hardware_timestamp(void)
void ___kmp_free(void *ptr KMP_SRC_LOC_DECL)
union KMP_ALIGN_CACHE kmp_time_global kmp_time_global_t
omp_allocator_handle_t const llvm_omp_target_device_mem_alloc
union KMP_ALIGN_CACHE kmp_global kmp_global_t
omp_allocator_handle_t const omp_null_allocator
kmp_uint32 __kmp_yield_next
void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid)
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int16
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int8
#define KMP_BUILD_ASSERT(expr)
#define KMP_DEBUG_ASSERT(cond)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
void __kmp_fatal(kmp_msg_t message,...)
kmp_ticket_lock_t kmp_lock_t
void(* microtask_t)(int *gtid, int *npr,...)
#define INTERNODE_CACHE_LINE
#define KMP_ATTRIBUTE_TARGET_WAITPKG
#define KMP_EXPAND_NAME(api_name)
__attribute__((noinline))
void microtask(int *global_tid, int *bound_tid)
struct private_common * data[KMP_HASH_TABLE_SIZE]
struct dispatch_private_info * next
std::atomic< kmp_uint32 > steal_flag
kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1]
volatile kmp_uint32 ordered_iteration
volatile kmp_uint32 iteration
volatile kmp_int32 num_done
volatile kmp_int64 num_done
volatile kmp_uint64 iteration
kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3]
volatile kmp_uint64 ordered_iteration
volatile kmp_uint32 buffer_index
kmp_int32 doacross_num_done
union dispatch_shared_info::shared_info u
volatile kmp_int32 doacross_buf_idx
volatile kmp_uint32 * doacross_flags
The ident structure that describes a source location.
kmp_int32 get_openmp_version()
char const * psource
String describing the source location.
kmp_int32 reserved_1
might be used in Fortran; see above
kmp_int32 reserved_2
not really used in Fortran any more; see above
kmp_int32 reserved_3
source[4] in Fortran, do not use for C++
kmp_int32 flags
also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member
kmp_allocator_t * fb_data
omp_alloctrait_value_t fb
omp_memspace_handle_t memspace
volatile kmp_uint32 t_value
std::atomic< kmp_int32 > npredecessors
std::atomic< kmp_int32 > nrefs
kmp_lock_t * mtx_locks[MAX_MTX_DEPS]
kmp_depnode_list_t * successors
enum dynamic_mode g_dynamic_mode
KMP_ALIGN_CACHE int th_set_nproc
kmp_cg_root_t * th_cg_roots
kmp_taskdata_t * th_current_task
KMP_ALIGN_CACHE kmp_team_p * th_serial_team
kmp_task_team_t * th_task_team
kmp_info_p * th_next_pool
kmp_uint64 th_team_bt_intervals
microtask_t th_teams_microtask
KMP_ALIGN_CACHE volatile kmp_int32 th_next_waiting
struct cons_header * th_cons
struct private_common * th_pri_head
omp_allocator_handle_t th_def_allocator
kmp_uint8 th_active_in_pool
std::atomic< kmp_uint32 > th_used_in_team
struct common_table * th_pri_common
kmp_teams_size_t th_teams_size
volatile void * th_sleep_loc
volatile kmp_uint32 th_spin_here
flag_type th_sleep_loc_type
kmp_proc_bind_t th_set_proc_bind
kmp_info_p * th_team_master
kmp_info_t * r_uber_thread
std::atomic< int > r_in_parallel
kmp_int32 tt_found_proxy_tasks
KMP_ALIGN_CACHE std::atomic< kmp_int32 > tt_unfinished_threads
kmp_bootstrap_lock_t tt_task_pri_lock
std::atomic< kmp_int32 > tt_num_task_pri
kmp_bootstrap_lock_t tt_threads_lock
kmp_int32 tt_untied_task_encountered
kmp_task_pri_t * tt_task_pri_list
kmp_int32 tt_hidden_helper_task_encountered
kmp_thread_data_t * tt_threads_data
KMP_ALIGN_CACHE volatile kmp_uint32 tt_active
kmp_task_team_t * tt_next
omp_allocator_handle_t t_def_allocator
kmp_nested_nthreads_t * t_nested_nth
kmp_proc_bind_t t_proc_bind
KMP_ALIGN_CACHE void ** t_argv
kmp_taskdata_t * t_implicit_task_taskdata
std::atomic< kmp_int32 > t_cancel_request
KMP_ALIGN_CACHE kmp_info_t ** t_threads
dispatch_shared_info_t * t_disp_buffer
KMP_ALIGN_CACHE kmp_internal_control_t * t_control_stack_top
KMP_ALIGN_CACHE int t_max_argc
std::atomic< int > t_construct
KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered
KMP_ALIGN_CACHE int t_master_tid
kmp_int32 td_deque_ntasks
kmp_taskdata_t ** td_deque
kmp_int32 td_deque_last_stolen
kmp_bootstrap_lock_t td_deque_lock
kmp_uint32 * skip_per_level
KMP_ALIGN_CACHE volatile kmp_uint64 b_arrived
kmp_uint8 use_oncore_barrier
struct kmp_bstate * parent_bar
kmp_internal_control_t th_fixed_icvs
struct kmp_cached_addr * next
kmp_int32 cg_thread_limit
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
struct kmp_depend_info::@8::@10 flags
kmp_dephash_entry_t * next_in_bucket
kmp_depnode_list_t * last_set
kmp_depnode_list_t * prev_set
kmp_dephash_entry_t ** buckets
kmp_depnode_list_t * next
void(* th_dxo_fcn)(int *gtid, int *cid, ident_t *)
kmp_int32 th_doacross_buf_idx
volatile kmp_uint32 * th_doacross_flags
dispatch_private_info_t * th_dispatch_pr_current
kmp_int64 * th_doacross_info
dispatch_private_info_t * th_disp_buffer
void(* th_deo_fcn)(int *gtid, int *cid, ident_t *)
dispatch_shared_info_t * th_dispatch_sh_current
kmp_proc_bind_t proc_bind
struct kmp_internal_control * next
PACKED_REDUCTION_METHOD_T packed_reduction_method
volatile int this_construct
kmp_proc_bind_t * bind_types
struct kmp_old_threads_list_t * next
int length[KMP_MAX_FIELDS]
int offset[KMP_MAX_FIELDS]
struct kmp_task_affinity_info::@11 flags
kmp_task_team_list_t * next
kmp_task_team_t * task_team
void * shareds
pointer to block of pointers to shared vars
kmp_int32 part_id
part id for the task
kmp_routine_entry_t routine
pointer to routine to call for executing task
kmp_uint32 td_taskwait_counter
ident_t * td_taskwait_ident
kmp_task_team_t * td_task_team
kmp_dephash_t * td_dephash
kmp_taskdata_t * td_parent
std::atomic< kmp_int32 > td_incomplete_child_tasks
std::atomic< kmp_int32 > td_untied_count
kmp_taskgroup_t * td_taskgroup
kmp_info_p * td_alloc_thread
kmp_depnode_t * td_depnode
kmp_int32 td_taskwait_thread
kmp_tasking_flags_t td_flags
kmp_taskdata_t * td_last_tied
KMP_ALIGN_CACHE kmp_internal_control_t td_icvs
kmp_event_t td_allow_completion_event
kmp_target_data_t td_target_data
KMP_ALIGN_CACHE std::atomic< kmp_int32 > td_allocated_child_tasks
std::atomic< kmp_int32 > cancel_request
std::atomic< kmp_int32 > count
struct kmp_taskgroup * parent
kmp_int32 reduce_num_data
unsigned priority_specified
unsigned destructors_thunk
struct private_common * next
struct private_common * link
struct private_data * next
union shared_common::@4 cct
union shared_common::@3 ct
union shared_common::@5 dt
struct private_data * pod_init
struct shared_common * next
struct shared_common * data[KMP_HASH_TABLE_SIZE]
dispatch_private_info64_t p64
dispatch_private_info32_t p32
dispatch_shared_info64_t s64
dispatch_shared_info32_t s32
kmp_routine_entry_t destructors
kmp_int32 priority
priority specified by user for the task
enum sched_type r_sched_type
kmp_base_thread_data_t td
kmp_uint64 __kmp_ticks_per_usec
void __kmp_reap_monitor(kmp_info_t *th)
kmp_uint64 __kmp_ticks_per_msec
int __kmp_read_from_file(char const *path, char const *format,...)
void __kmp_register_atfork(void)
void __kmp_free_handle(kmp_thread_t tHandle)
void __kmp_affinity_determine_capable(const char *env_var)
void __kmp_affinity_bind_thread(int proc)
int __kmp_get_load_balance(int max)
int __kmp_still_running(kmp_info_t *th)
void __kmp_initialize_system_tick(void)
int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val)