17#include "kmp_config.h"
27#ifndef KMP_STATIC_STEAL_ENABLED
28#define KMP_STATIC_STEAL_ENABLED 1
30#define KMP_WEIGHTED_ITERATIONS_SUPPORTED \
31 (KMP_AFFINITY_SUPPORTED && KMP_STATIC_STEAL_ENABLED && \
32 (KMP_ARCH_X86 || KMP_ARCH_X86_64))
34#define TASK_CURRENT_NOT_QUEUED 0
35#define TASK_CURRENT_QUEUED 1
37#ifdef BUILD_TIED_TASK_STACK
38#define TASK_STACK_EMPTY 0
39#define TASK_STACK_BLOCK_BITS 5
41#define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
43#define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
46#define TASK_NOT_PUSHED 1
47#define TASK_SUCCESSFULLY_PUSHED 0
50#define TASK_EXPLICIT 1
51#define TASK_IMPLICIT 0
54#define TASK_DETACHABLE 1
55#define TASK_UNDETACHABLE 0
57#define KMP_CANCEL_THREADS
58#define KMP_THREAD_ATTR
62#if defined(__ANDROID__)
63#undef KMP_CANCEL_THREADS
69#undef KMP_CANCEL_THREADS
100#if KMP_USE_HIER_SCHED
102#undef KMP_USE_HIER_SCHED
103#define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
107#if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED && !defined(OMPD_SKIP_HWLOC)
109#ifndef HWLOC_OBJ_NUMANODE
110#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
112#ifndef HWLOC_OBJ_PACKAGE
113#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
117#if KMP_ARCH_X86 || KMP_ARCH_X86_64
118#include <xmmintrin.h>
122#define KMP_INTERNAL_MALLOC(sz) malloc(sz)
123#define KMP_INTERNAL_FREE(p) free(p)
124#define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
125#define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
136#define KMP_HANDLE_SIGNALS ((KMP_OS_UNIX && !KMP_OS_WASI) || KMP_OS_WINDOWS)
141#if !defined NSIG && defined _NSIG
147#pragma weak clock_gettime
159#define UNLIKELY(x) (x)
168#ifndef USE_FAST_MEMORY
169#define USE_FAST_MEMORY 3
172#ifndef KMP_NESTED_HOT_TEAMS
173#define KMP_NESTED_HOT_TEAMS 0
174#define USE_NESTED_HOT_ARG(x)
176#if KMP_NESTED_HOT_TEAMS
177#define USE_NESTED_HOT_ARG(x) , x
179#define USE_NESTED_HOT_ARG(x)
184#ifndef USE_CMP_XCHG_FOR_BGET
185#define USE_CMP_XCHG_FOR_BGET 1
193#define KMP_NSEC_PER_SEC 1000000000L
194#define KMP_USEC_PER_SEC 1000000L
195#define KMP_NSEC_PER_USEC 1000L
278template <
bool C = false,
bool S = true>
class kmp_flag_32;
279template <
bool C = false,
bool S = true>
class kmp_flag_64;
291#define KMP_PACK_64(HIGH_32, LOW_32) \
292 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
297 while (*(_x) == ' ' || *(_x) == '\t') \
300#define SKIP_DIGITS(_x) \
302 while (*(_x) >= '0' && *(_x) <= '9') \
305#define SKIP_TOKEN(_x) \
307 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
308 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
311#define SKIP_TO(_x, _c) \
313 while (*(_x) != '\0' && *(_x) != (_c)) \
319#define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
320#define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
335#ifdef USE_LOAD_BALANCE
336 dynamic_load_balance,
345#ifndef KMP_SCHED_TYPE_DEFINED
346#define KMP_SCHED_TYPE_DEFINED
357#if KMP_STATIC_STEAL_ENABLED
358 kmp_sched_static_steal = 102,
466#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
467#define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
468#define SCHEDULE_HAS_NO_MODIFIERS(s) \
469 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
470#define SCHEDULE_GET_MODIFIERS(s) \
471 ((enum sched_type)( \
472 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
473#define SCHEDULE_SET_MODIFIERS(s, m) \
474 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
475#define SCHEDULE_NONMONOTONIC 0
476#define SCHEDULE_MONOTONIC 1
495 *internal_kind = (
enum sched_type)((
int)*internal_kind |
525enum clock_function_type {
526 clock_function_gettimeofday,
527 clock_function_clock_gettime
532enum mic_type { non_mic, mic1, mic2, mic3, dummy };
537#undef KMP_FAST_REDUCTION_BARRIER
538#define KMP_FAST_REDUCTION_BARRIER 1
540#undef KMP_FAST_REDUCTION_CORE_DUO
541#if KMP_ARCH_X86 || KMP_ARCH_X86_64
542#define KMP_FAST_REDUCTION_CORE_DUO 1
565#if KMP_FAST_REDUCTION_BARRIER
566#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
567 ((reduction_method) | (barrier_type))
569#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
570 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
572#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
573 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
575#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
578#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
579 (packed_reduction_method)
581#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
584#define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
585 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
586 (which_reduction_block))
588#if KMP_FAST_REDUCTION_BARRIER
589#define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
590 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
592#define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
593 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
604#pragma warning(disable : 271 310)
638#if KMP_ARCH_X86 || KMP_ARCH_X86_64
639 KMP_HW_CORE_TYPE_ATOM = 0x20,
640 KMP_HW_CORE_TYPE_CORE = 0x40,
647#define KMP_HW_MAX_NUM_CORE_EFFS 8
649#define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
650 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
651#define KMP_ASSERT_VALID_HW_TYPE(type) \
652 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
654#define KMP_FOREACH_HW_TYPE(type) \
655 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
656 type = (kmp_hw_t)((int)type + 1))
663#if KMP_AFFINITY_SUPPORTED
667#if _MSC_VER < 1600 && KMP_MSVC_COMPAT
668typedef struct GROUP_AFFINITY {
674#if KMP_GROUP_AFFINITY
675extern int __kmp_num_proc_groups;
677static const int __kmp_num_proc_groups = 1;
679typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
680extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
682typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(
void);
683extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
685typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
686extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
688typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
690extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
693#if KMP_USE_HWLOC && !defined(OMPD_SKIP_HWLOC)
694extern hwloc_topology_t __kmp_hwloc_topology;
695extern int __kmp_hwloc_error;
698extern size_t __kmp_affin_mask_size;
699#define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
700#define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
701#define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
702#define KMP_CPU_SET_ITERATE(i, mask) \
703 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
704#define KMP_CPU_SET(i, mask) (mask)->set(i)
705#define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
706#define KMP_CPU_CLR(i, mask) (mask)->clear(i)
707#define KMP_CPU_ZERO(mask) (mask)->zero()
708#define KMP_CPU_ISEMPTY(mask) (mask)->empty()
709#define KMP_CPU_COPY(dest, src) (dest)->copy(src)
710#define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
711#define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
712#define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
713#define KMP_CPU_EQUAL(dest, src) (dest)->is_equal(src)
714#define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
715#define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
716#define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
717#define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
718#define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
719#define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
720#define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
721#define KMP_CPU_ALLOC_ARRAY(arr, n) \
722 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
723#define KMP_CPU_FREE_ARRAY(arr, n) \
724 __kmp_affinity_dispatch->deallocate_mask_array(arr)
725#define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
726#define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
727#define __kmp_get_system_affinity(mask, abort_bool) \
728 (mask)->get_system_affinity(abort_bool)
729#define __kmp_set_system_affinity(mask, abort_bool) \
730 (mask)->set_system_affinity(abort_bool)
731#define __kmp_get_proc_group(mask) (mask)->get_proc_group()
737 void *
operator new(
size_t n);
738 void operator delete(
void *
p);
739 void *
operator new[](
size_t n);
740 void operator delete[](
void *
p);
743 virtual void set(
int i) {}
745 virtual bool is_set(
int i)
const {
return false; }
747 virtual void clear(
int i) {}
749 virtual void zero() {}
751 virtual bool empty()
const {
return true; }
753 virtual void copy(
const Mask *src) {}
755 virtual void bitwise_and(
const Mask *rhs) {}
757 virtual void bitwise_or(
const Mask *rhs) {}
759 virtual void bitwise_not() {}
761 virtual bool is_equal(
const Mask *rhs)
const {
return false; }
764 virtual int begin()
const {
return 0; }
765 virtual int end()
const {
return 0; }
766 virtual int next(
int previous)
const {
return 0; }
768 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
771 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
773 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
776 virtual int get_proc_group()
const {
return -1; }
777 int get_max_cpu()
const {
780 KMP_CPU_SET_ITERATE(cpu,
this) {
787 void *
operator new(
size_t n);
788 void operator delete(
void *
p);
790 virtual ~KMPAffinity() =
default;
792 virtual void determine_capable(
const char *env_var) {}
794 virtual void bind_thread(
int proc) {}
796 virtual Mask *allocate_mask() {
return nullptr; }
797 virtual void deallocate_mask(Mask *m) {}
798 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
799 virtual void deallocate_mask_array(Mask *m) {}
800 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
801 static void pick_api();
802 static void destroy_api();
810 virtual api_type get_api_type()
const {
816 static bool picked_api;
819typedef KMPAffinity::Mask kmp_affin_mask_t;
820extern KMPAffinity *__kmp_affinity_dispatch;
823class kmp_affinity_raii_t {
824 kmp_affin_mask_t *
mask;
828 kmp_affinity_raii_t(
const kmp_affin_mask_t *new_mask =
nullptr)
829 :
mask(nullptr), restored(false) {
830 if (KMP_AFFINITY_CAPABLE()) {
833 __kmp_get_system_affinity(
mask,
true);
835 __kmp_set_system_affinity(new_mask,
true);
839 if (
mask && KMP_AFFINITY_CAPABLE() && !restored) {
840 __kmp_set_system_affinity(
mask,
true);
845 ~kmp_affinity_raii_t() { restore(); }
851#define KMP_AFFIN_MASK_PRINT_LEN 1024
865enum affinity_top_method {
866 affinity_top_method_all = 0,
867#if KMP_ARCH_X86 || KMP_ARCH_X86_64
868 affinity_top_method_apicid,
869 affinity_top_method_x2apicid,
870 affinity_top_method_x2apicid_1f,
872 affinity_top_method_cpuinfo,
873#if KMP_GROUP_AFFINITY
874 affinity_top_method_group,
876 affinity_top_method_flat,
878 affinity_top_method_hwloc,
880 affinity_top_method_default
883#define affinity_respect_mask_default (2)
885typedef struct kmp_affinity_flags_t {
887 unsigned verbose : 1;
888 unsigned warnings : 1;
889 unsigned respect : 2;
892 unsigned core_types_gran : 1;
893 unsigned core_effs_gran : 1;
894 unsigned omp_places : 1;
895 unsigned reserved : 22;
896} kmp_affinity_flags_t;
899typedef struct kmp_affinity_ids_t {
904typedef struct kmp_affinity_attrs_t {
908 unsigned reserved : 15;
909} kmp_affinity_attrs_t;
910#define KMP_AFFINITY_ATTRS_UNKNOWN \
911 { KMP_HW_CORE_TYPE_UNKNOWN, kmp_hw_attr_t::UNKNOWN_CORE_EFF, 0, 0 }
913typedef struct kmp_affinity_t {
915 enum affinity_type
type;
918 kmp_affinity_attrs_t core_attr_gran;
921 kmp_affinity_flags_t flags;
923 kmp_affin_mask_t *masks;
924 kmp_affinity_ids_t *ids;
925 kmp_affinity_attrs_t *attrs;
926 unsigned num_os_id_masks;
927 kmp_affin_mask_t *os_id_masks;
931#define KMP_AFFINITY_INIT(env) \
933 nullptr, affinity_default, KMP_HW_UNKNOWN, -1, KMP_AFFINITY_ATTRS_UNKNOWN, \
935 {TRUE, FALSE, TRUE, affinity_respect_mask_default, FALSE, FALSE, \
936 FALSE, FALSE, FALSE}, \
937 0, nullptr, nullptr, nullptr, 0, nullptr, env \
940extern enum affinity_top_method __kmp_affinity_top_method;
941extern kmp_affinity_t __kmp_affinity;
942extern kmp_affinity_t __kmp_hh_affinity;
943extern kmp_affinity_t *__kmp_affinities[2];
947extern kmp_affin_mask_t *__kmp_affin_fullMask;
948extern kmp_affin_mask_t *__kmp_affin_origMask;
949extern char *__kmp_cpuinfo_file;
951#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
952extern int __kmp_first_osid_with_ecore;
981extern int __kmp_tool;
982extern char *__kmp_tool_libraries;
985#if KMP_AFFINITY_SUPPORTED
986#define KMP_PLACE_ALL (-1)
987#define KMP_PLACE_UNDEFINED (-2)
989#define KMP_AFFINITY_NON_PROC_BIND \
990 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
991 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
992 (__kmp_affinity.num_masks > 0 || __kmp_affinity.type == affinity_balanced))
1022#define KMP_PAD(type, sz) \
1023 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
1027#define KMP_GTID_DNE (-2)
1028#define KMP_GTID_SHUTDOWN (-3)
1029#define KMP_GTID_MONITOR (-4)
1030#define KMP_GTID_UNKNOWN (-5)
1031#define KMP_GTID_MIN (-6)
1071#define omp_atv_default ((omp_uintptr_t)-1)
1133extern void *
__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1140extern void *
__kmp_alloc(
int gtid,
size_t align,
size_t sz,
1142extern void *
__kmp_calloc(
int gtid,
size_t align,
size_t nmemb,
size_t sz,
1155#if ENABLE_LIBOMPTARGET
1156extern void __kmp_init_target_task();
1161#define KMP_UINT64_MAX \
1162 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1164#define KMP_MIN_NTH 1
1167#if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1168#define KMP_MAX_NTH PTHREAD_THREADS_MAX
1174#define KMP_MAX_NTH 64
1176#define KMP_MAX_NTH INT_MAX
1181#ifdef PTHREAD_STACK_MIN
1182#define KMP_MIN_STKSIZE ((size_t)PTHREAD_STACK_MIN)
1184#define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1187#if KMP_OS_AIX && KMP_ARCH_PPC
1188#define KMP_MAX_STKSIZE 0x10000000
1190#define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1194#define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1195#elif KMP_ARCH_X86_64
1196#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1197#define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1201#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1204#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1206#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1209#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1210#define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1211#define KMP_MAX_MALLOC_POOL_INCR \
1212 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1214#define KMP_MIN_STKOFFSET (0)
1215#define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1217#define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1219#define KMP_DEFAULT_STKOFFSET CACHE_LINE
1222#define KMP_MIN_STKPADDING (0)
1223#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1225#define KMP_BLOCKTIME_MULTIPLIER \
1227#define KMP_MIN_BLOCKTIME (0)
1228#define KMP_MAX_BLOCKTIME \
1232#define KMP_DEFAULT_BLOCKTIME (__kmp_is_hybrid_cpu() ? (0) : (200000))
1235#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1236#define KMP_MIN_MONITOR_WAKEUPS (1)
1237#define KMP_MAX_MONITOR_WAKEUPS (1000)
1241#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1242 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1243 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1244 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1245 ? (monitor_wakeups) \
1246 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1250#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1251 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1252 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1254#define KMP_BLOCKTIME(team, tid) \
1255 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1256#if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1260#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1261#define KMP_NOW() ((kmp_uint64)_rdtsc())
1263#define KMP_NOW() __kmp_hardware_timestamp()
1265#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1266 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_usec)
1267#define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1271#define KMP_NOW() __kmp_now_nsec()
1272#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1273 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * (kmp_uint64)KMP_NSEC_PER_USEC)
1274#define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1278#define KMP_MIN_STATSCOLS 40
1279#define KMP_MAX_STATSCOLS 4096
1280#define KMP_DEFAULT_STATSCOLS 80
1282#define KMP_MIN_INTERVAL 0
1283#define KMP_MAX_INTERVAL (INT_MAX - 1)
1284#define KMP_DEFAULT_INTERVAL 0
1286#define KMP_MIN_CHUNK 1
1287#define KMP_MAX_CHUNK (INT_MAX - 1)
1288#define KMP_DEFAULT_CHUNK 1
1290#define KMP_MIN_DISP_NUM_BUFF 1
1291#define KMP_DFLT_DISP_NUM_BUFF 7
1292#define KMP_MAX_DISP_NUM_BUFF 4096
1294#define KMP_MAX_ORDERED 8
1296#define KMP_MAX_FIELDS 32
1298#define KMP_MAX_BRANCH_BITS 31
1300#define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1302#define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1304#define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1309#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1310#define KMP_TLS_GTID_MIN 5
1312#define KMP_TLS_GTID_MIN INT_MAX
1315#define KMP_MASTER_TID(tid) (0 == (tid))
1316#define KMP_WORKER_TID(tid) (0 != (tid))
1318#define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1319#define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1320#define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1324#define TRUE (!FALSE)
1330#define KMP_INIT_WAIT 64U
1331#define KMP_NEXT_WAIT 32U
1333#define KMP_INIT_WAIT 1024U
1334#define KMP_NEXT_WAIT 512U
1337#define KMP_INIT_WAIT 1024U
1338#define KMP_NEXT_WAIT 512U
1339#elif KMP_OS_DRAGONFLY
1341#define KMP_INIT_WAIT 1024U
1342#define KMP_NEXT_WAIT 512U
1345#define KMP_INIT_WAIT 1024U
1346#define KMP_NEXT_WAIT 512U
1349#define KMP_INIT_WAIT 1024U
1350#define KMP_NEXT_WAIT 512U
1353#define KMP_INIT_WAIT 1024U
1354#define KMP_NEXT_WAIT 512U
1357#define KMP_INIT_WAIT 1024U
1358#define KMP_NEXT_WAIT 512U
1361#define KMP_INIT_WAIT 1024U
1362#define KMP_NEXT_WAIT 512U
1365#define KMP_INIT_WAIT 1024U
1366#define KMP_NEXT_WAIT 512U
1369#define KMP_INIT_WAIT 1024U
1370#define KMP_NEXT_WAIT 512U
1373#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1374typedef struct kmp_cpuid {
1381typedef struct kmp_cpuinfo_flags_t {
1384 unsigned hybrid : 1;
1385 unsigned reserved : 29;
1386} kmp_cpuinfo_flags_t;
1388typedef struct kmp_cpuinfo {
1395 kmp_cpuinfo_flags_t flags;
1398 char name[3 *
sizeof(kmp_cpuid_t)];
1401extern void __kmp_query_cpuid(kmp_cpuinfo_t *
p);
1406static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *
p) {
1407 __asm__ __volatile__(
"cpuid"
1408 :
"=a"(
p->eax),
"=b"(
p->ebx),
"=c"(
p->ecx),
"=d"(
p->edx)
1409 :
"a"(leaf),
"c"(subleaf));
1412static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *
p) {
1413 __asm__ __volatile__(
"fldcw %0" : :
"m"(*
p));
1416static inline void __kmp_store_x87_fpu_control_word(
kmp_int16 *
p) {
1417 __asm__ __volatile__(
"fstcw %0" :
"=m"(*
p));
1419static inline void __kmp_clear_x87_fpu_status_word() {
1422 struct x87_fpu_state {
1431 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1432 __asm__ __volatile__(
"fstenv %0\n\t"
1433 "andw $0x7f00, %1\n\t"
1435 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1437 __asm__ __volatile__(
"fnclex");
1441static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) { _mm_setcsr(*
p); }
1442static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = _mm_getcsr(); }
1444static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) {}
1445static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = 0; }
1449extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *
p);
1450extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *
p);
1451extern void __kmp_store_x87_fpu_control_word(
kmp_int16 *
p);
1452extern void __kmp_clear_x87_fpu_status_word();
1453static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) { _mm_setcsr(*
p); }
1454static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = _mm_getcsr(); }
1457#define KMP_X86_MXCSR_MASK 0xffffffc0
1462#if KMP_HAVE_WAITPKG_INTRINSICS
1463#if KMP_HAVE_IMMINTRIN_H
1464#include <immintrin.h>
1465#elif KMP_HAVE_INTRIN_H
1471static inline int __kmp_tpause(uint32_t hint, uint64_t
counter) {
1472#if !KMP_HAVE_WAITPKG_INTRINSICS
1473 uint32_t timeHi = uint32_t(
counter >> 32);
1474 uint32_t timeLo = uint32_t(
counter & 0xffffffff);
1476 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1482 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1486 return _tpause(hint,
counter);
1490static inline void __kmp_umonitor(
void *cacheline) {
1491#if !KMP_HAVE_WAITPKG_INTRINSICS
1492 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1497 _umonitor(cacheline);
1501static inline int __kmp_umwait(uint32_t hint, uint64_t
counter) {
1502#if !KMP_HAVE_WAITPKG_INTRINSICS
1503 uint32_t timeHi = uint32_t(
counter >> 32);
1504 uint32_t timeLo = uint32_t(
counter & 0xffffffff);
1506 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1512 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1516 return _umwait(hint,
counter);
1521#include <pmmintrin.h>
1529__kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1530 _mm_monitor(cacheline, extensions, hints);
1536__kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1537 _mm_mwait(extensions, hints);
1542extern void __kmp_x86_pause(
void);
1548static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1550static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1552#define KMP_CPU_PAUSE() __kmp_x86_pause()
1554#define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1555#define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1556#define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1557#define KMP_CPU_PAUSE() \
1559 KMP_PPC64_PRI_LOW(); \
1560 KMP_PPC64_PRI_MED(); \
1561 KMP_PPC64_PRI_LOC_MB(); \
1564#define KMP_CPU_PAUSE()
1567#define KMP_INIT_YIELD(count) \
1568 { (count) = __kmp_yield_init; }
1570#define KMP_INIT_BACKOFF(time) \
1571 { (time) = __kmp_pause_init; }
1573#define KMP_OVERSUBSCRIBED \
1574 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1576#define KMP_TRY_YIELD \
1577 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1579#define KMP_TRY_YIELD_OVERSUB \
1580 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1582#define KMP_YIELD(cond) \
1585 if ((cond) && (KMP_TRY_YIELD)) \
1589#define KMP_YIELD_OVERSUB() \
1592 if ((KMP_TRY_YIELD_OVERSUB)) \
1598#define KMP_YIELD_SPIN(count) \
1601 if (KMP_TRY_YIELD) { \
1605 (count) = __kmp_yield_next; \
1616#define KMP_TPAUSE_MAX_MASK ((kmp_uint64)0xFFFF)
1617#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1619 if (__kmp_tpause_enabled) { \
1620 if (KMP_OVERSUBSCRIBED) { \
1621 __kmp_tpause(0, (time)); \
1623 __kmp_tpause(__kmp_tpause_hint, (time)); \
1625 (time) = (time << 1 | 1) & KMP_TPAUSE_MAX_MASK; \
1628 if ((KMP_TRY_YIELD_OVERSUB)) { \
1630 } else if (__kmp_use_yield == 1) { \
1634 (count) = __kmp_yield_next; \
1640#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1643 if ((KMP_TRY_YIELD_OVERSUB)) \
1645 else if (__kmp_use_yield == 1) { \
1649 (count) = __kmp_yield_next; \
1677#define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1703typedef HANDLE kmp_thread_t;
1704typedef DWORD kmp_key_t;
1708typedef pthread_t kmp_thread_t;
1709typedef pthread_key_t kmp_key_t;
1728typedef int kmp_itt_mark_t;
1729#define KMP_ITT_DEBUG 0
1761typedef void *(*kmpc_ctor)(
void *);
1774typedef void *(*kmpc_cctor)(
void *,
void *);
1784typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1796typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1848#define KMP_HASH_TABLE_LOG2 9
1849#define KMP_HASH_TABLE_SIZE \
1850 (1 << KMP_HASH_TABLE_LOG2)
1851#define KMP_HASH_SHIFT 3
1852#define KMP_HASH(x) \
1853 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1865#if KMP_USE_HIER_SCHED
1868typedef struct kmp_hier_private_bdata_t {
1872} kmp_hier_private_bdata_t;
1886#if KMP_STATIC_STEAL_ENABLED
1912#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1922#if CACHE_LINE <= 128
1951#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1962#if CACHE_LINE <= 128
2021#if KMP_USE_HIER_SCHED
2057#if KMP_USE_HIER_SCHED
2082#if KMP_USE_INTERNODE_ALIGNMENT
2091#define KMP_INIT_BARRIER_STATE 0
2092#define KMP_BARRIER_SLEEP_BIT 0
2093#define KMP_BARRIER_UNUSED_BIT 1
2094#define KMP_BARRIER_BUMP_BIT 2
2096#define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
2097#define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
2098#define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
2100#if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
2101#error "Barrier sleep bit must be smaller than barrier bump bit"
2103#if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
2104#error "Barrier unused bit must be smaller than barrier bump bit"
2108#define KMP_BARRIER_NOT_WAITING 0
2109#define KMP_BARRIER_OWN_FLAG \
2111#define KMP_BARRIER_PARENT_FLAG \
2113#define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
2115#define KMP_BARRIER_SWITCHING \
2118#define KMP_NOT_SAFE_TO_REAP \
2120#define KMP_SAFE_TO_REAP 1
2135#if KMP_FAST_REDUCTION_BARRIER
2142#if !KMP_FAST_REDUCTION_BARRIER
2143#define bs_reduction_barrier bs_plain_barrier
2158#define KMP_BARRIER_ICV_PUSH 1
2255typedef struct kmp_win32_mutex {
2257 CRITICAL_SECTION cs;
2260typedef struct kmp_win32_cond {
2265 kmp_win32_mutex_t waiters_count_lock_;
2272 int wait_generation_count_;
2284 pthread_cond_t c_cond;
2287typedef union kmp_cond_union kmp_cond_align_t;
2292 pthread_mutex_t m_mutex;
2295typedef union kmp_mutex_union kmp_mutex_align_t;
2307 volatile int ds_alive;
2336#if !USE_CMP_XCHG_FOR_BGET
2337#ifdef USE_QUEUING_LOCK_FOR_BGET
2353#define KMP_CHECK_UPDATE(a, b) \
2356#define KMP_CHECK_UPDATE_SYNC(a, b) \
2358 TCW_SYNC_PTR((a), (b))
2360#define get__blocktime(xteam, xtid) \
2361 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2362#define get__bt_set(xteam, xtid) \
2363 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2365#define get__bt_intervals(xteam, xtid) \
2366 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2369#define get__dynamic_2(xteam, xtid) \
2370 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2371#define get__nproc_2(xteam, xtid) \
2372 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2373#define get__sched_2(xteam, xtid) \
2374 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2376#define set__blocktime_team(xteam, xtid, xval) \
2377 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2381#define set__bt_intervals_team(xteam, xtid, xval) \
2382 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2386#define set__bt_set_team(xteam, xtid, xval) \
2387 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2389#define set__dynamic(xthread, xval) \
2390 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2391#define get__dynamic(xthread) \
2392 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2394#define set__nproc(xthread, xval) \
2395 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2397#define set__thread_limit(xthread, xval) \
2398 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2400#define set__max_active_levels(xthread, xval) \
2401 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2403#define get__max_active_levels(xthread) \
2404 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2406#define set__sched(xthread, xval) \
2407 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2409#define set__proc_bind(xthread, xval) \
2410 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2411#define get__proc_bind(xthread) \
2412 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2436#define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2437#define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2441#define KMP_TASKING_ENABLED(task_team) \
2442 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2481 std::atomic<kmp_int32>
2496#define KMP_DEP_IN 0x1
2497#define KMP_DEP_OUT 0x2
2498#define KMP_DEP_INOUT 0x3
2499#define KMP_DEP_MTX 0x4
2500#define KMP_DEP_SET 0x8
2501#define KMP_DEP_ALL 0x80
2510#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2537#define MAX_MTX_DEPS 4
2545#if KMP_SUPPORT_GRAPH_OUTPUT
2602#define INIT_MAPSIZE 50
2604typedef struct kmp_taskgraph_flags {
2605 unsigned nowait : 1;
2606 unsigned re_record : 1;
2607 unsigned reserved : 30;
2608} kmp_taskgraph_flags_t;
2611typedef struct kmp_node_info {
2615 std::atomic<kmp_int32>
2616 npredecessors_counter;
2623typedef enum kmp_tdg_status {
2625 KMP_TDG_RECORDING = 1,
2630typedef struct kmp_tdg_info {
2632 kmp_taskgraph_flags_t tdg_flags;
2636 kmp_node_info_t *record_map;
2637 kmp_tdg_status_t tdg_status =
2639 std::atomic<kmp_int32> num_tasks;
2643 void *rec_taskred_data;
2648extern int __kmp_tdg_dot;
2650extern kmp_tdg_info_t **__kmp_global_tdgs;
2653extern std::atomic<kmp_int32> __kmp_tdg_task_id;
2657#ifdef BUILD_TIED_TASK_STACK
2660typedef struct kmp_stack_block {
2662 struct kmp_stack_block *sb_next;
2663 struct kmp_stack_block *sb_prev;
2666typedef struct kmp_task_stack {
2667 kmp_stack_block_t ts_first_block;
2675#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2763 std::atomic<kmp_int32>
2773#if defined(KMP_GOMP_COMPAT)
2778#if defined(KMP_GOMP_COMPAT)
2780 void (*td_copy_func)(
void *,
void *);
2787 bool is_taskgraph = 0;
2788 kmp_tdg_info_t *tdg;
2810#ifdef BUILD_TIED_TASK_STACK
2811 kmp_task_stack_t td_susp_tied_tasks;
2816#define TASK_DEQUE_BITS 8
2817#define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2819#define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2820#define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2879#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2882typedef struct kmp_free_list {
2883 void *th_free_list_self;
2884 void *th_free_list_sync;
2886 void *th_free_list_other;
2890#if KMP_NESTED_HOT_TEAMS
2893typedef struct kmp_hot_team_ptr {
2896} kmp_hot_team_ptr_t;
2951 int th_team_bt_intervals;
2957#if KMP_AFFINITY_SUPPORTED
2958 kmp_affin_mask_t *th_affin_mask;
2959 kmp_affinity_ids_t th_topology_ids;
2960 kmp_affinity_attrs_t th_topology_attrs;
2966#if KMP_NESTED_HOT_TEAMS
2967 kmp_hot_team_ptr_t *th_hot_teams;
2973#if KMP_AFFINITY_SUPPORTED
2974 int th_current_place;
3027#if KMP_USE_HIER_SCHED
3029 kmp_hier_private_bdata_t *th_hier_bar_data;
3038#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
3040 kmp_free_list_t th_free_lists[NUM_LISTS];
3045 kmp_win32_cond_t th_suspend_cv;
3046 kmp_win32_mutex_t th_suspend_mx;
3047 std::atomic<int> th_suspend_init;
3050 kmp_cond_align_t th_suspend_cv;
3051 kmp_mutex_align_t th_suspend_mx;
3052 std::atomic<int> th_suspend_init_count;
3056 kmp_itt_mark_t th_itt_mark_single;
3059#if KMP_STATS_ENABLED
3060 kmp_stats_list *th_stats;
3063 std::atomic<bool> th_blocking;
3095#define KMP_MIN_MALLOC_ARGV_ENTRIES 100
3101#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3102#define KMP_INLINE_ARGV_BYTES \
3104 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
3105 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
3108#define KMP_INLINE_ARGV_BYTES \
3109 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
3111#define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
3122 std::atomic<void *> t_tg_reduce_data[2];
3123 std::atomic<int> t_tg_fini_counter[2];
3155#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3176#if KMP_AFFINITY_SUPPORTED
3186#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
3191 char dummy_padding[1024];
3201 std::atomic<kmp_uint32> t_copyin_counter;
3260#if KMP_AFFINITY_SUPPORTED
3261 int r_affinity_assigned;
3289extern int __kmp_forkjoin_frames;
3290extern int __kmp_forkjoin_frames_mode;
3296extern int kmp_a_debug;
3297extern int kmp_b_debug;
3298extern int kmp_c_debug;
3299extern int kmp_d_debug;
3300extern int kmp_e_debug;
3301extern int kmp_f_debug;
3305#define KMP_DEBUG_BUF_LINES_INIT 512
3306#define KMP_DEBUG_BUF_LINES_MIN 1
3308#define KMP_DEBUG_BUF_CHARS_INIT 128
3309#define KMP_DEBUG_BUF_CHARS_MIN 2
3327extern int __kmp_par_range;
3329#define KMP_PAR_RANGE_ROUTINE_LEN 1024
3330extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
3331#define KMP_PAR_RANGE_FILENAME_LEN 1024
3332extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
3333extern int __kmp_par_range_lb;
3334extern int __kmp_par_range_ub;
3344#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3345extern kmp_cpuinfo_t __kmp_cpuinfo;
3347#elif KMP_OS_DARWIN && KMP_ARCH_AARCH64
3360extern volatile int __kmp_init_monitor;
3414extern size_t __kmp_monitor_stksize;
3429extern int __kmp_suspend_count;
3471 if (*bt > INT_MAX / 1000) {
3472 *bt = INT_MAX / 1000;
3473 KMP_INFORM(MaxValueUsing,
"kmp_set_blocktime(ms)", bt);
3481 __kmp_monitor_wakeups;
3482extern int __kmp_bt_intervals;
3485#ifdef KMP_ADJUST_BLOCKTIME
3486extern int __kmp_zero_bt;
3488#ifdef KMP_DFLT_NTH_CORES
3506#ifdef KMP_TDATA_GTID
3511#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3512extern int __kmp_inherit_fp_control;
3513extern kmp_int16 __kmp_init_x87_fpu_control_word;
3525#if KMP_NESTED_HOT_TEAMS
3526extern int __kmp_hot_teams_mode;
3527extern int __kmp_hot_teams_max_level;
3531extern enum clock_function_type __kmp_clock_function;
3532extern int __kmp_clock_function_param;
3535#if KMP_MIC_SUPPORTED
3536extern enum mic_type __kmp_mic_type;
3539#ifdef USE_LOAD_BALANCE
3540extern double __kmp_load_balance_interval;
3552#if KMP_USE_ADAPTIVE_LOCKS
3555struct kmp_adaptive_backoff_params_t {
3563extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3565#if KMP_DEBUG_ADAPTIVE_LOCKS
3566extern const char *__kmp_speculative_statsfile;
3600#define __kmp_get_gtid() __kmp_get_global_thread_id()
3601#define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3602#define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3603#define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3604#define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3609#define __kmp_get_team_num_threads(gtid) \
3610 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3626 return team->
t.t_threads[tid]->th.th_info.ds.ds_gtid;
3631 return thr->th.th_info.ds.ds_gtid;
3649#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3650extern int __kmp_user_level_mwait;
3651extern int __kmp_umwait_enabled;
3652extern int __kmp_mwait_enabled;
3653extern int __kmp_mwait_hints;
3657extern int __kmp_waitpkg_enabled;
3658extern int __kmp_tpause_state;
3659extern int __kmp_tpause_hint;
3660extern int __kmp_tpause_enabled;
3674#define _KMP_GEN_ID(counter) \
3675 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3677#define _KMP_GEN_ID(counter) (~0)
3680#define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3681#define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3686 size_t size,
char const *format, ...);
3712#ifdef USE_LOAD_BALANCE
3722extern void __kmp_warn(
char const *format, ...);
3754#define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3755#define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3756#define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3759extern void *___kmp_fast_allocate(
kmp_info_t *this_thr,
3762extern void __kmp_free_fast_memory(
kmp_info_t *this_thr);
3763extern void __kmp_initialize_fast_memory(
kmp_info_t *this_thr);
3764#define __kmp_fast_allocate(this_thr, size) \
3765 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3766#define __kmp_fast_free(this_thr, ptr) \
3767 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3776#define __kmp_thread_malloc(th, size) \
3777 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3778#define __kmp_thread_calloc(th, nelem, elsize) \
3779 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3780#define __kmp_thread_realloc(th, ptr, size) \
3781 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3782#define __kmp_thread_free(th, ptr) \
3783 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3792 int num_teams_ub,
int num_threads);
3829#ifdef KMP_GOMP_COMPAT
3878#if KMP_HANDLE_SIGNALS
3879extern int __kmp_handle_signals;
3880extern void __kmp_install_signals(
int parallel_init);
3881extern void __kmp_remove_signals(
void);
3892#if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && (KMP_ARCH_AARCH64 || KMP_ARCH_ARM))
3901#if KMP_AFFINITY_SUPPORTED
3902extern char *__kmp_affinity_print_mask(
char *
buf,
int buf_len,
3903 kmp_affin_mask_t *
mask);
3905 kmp_affin_mask_t *
mask);
3906extern void __kmp_affinity_initialize(kmp_affinity_t &affinity);
3907extern void __kmp_affinity_uninitialize(
void);
3908extern void __kmp_affinity_set_init_mask(
3909 int gtid,
int isa_root);
3910void __kmp_affinity_bind_init_mask(
int gtid);
3911extern void __kmp_affinity_bind_place(
int gtid);
3913extern int __kmp_aux_set_affinity(
void **
mask);
3914extern int __kmp_aux_get_affinity(
void **
mask);
3915extern int __kmp_aux_get_affinity_max_proc();
3916extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **
mask);
3917extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **
mask);
3918extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **
mask);
3919extern void __kmp_balanced_affinity(
kmp_info_t *th,
int team_size);
3920#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
3921extern int __kmp_get_first_osid_with_ecore(
void);
3923#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3925extern int kmp_set_thread_affinity_mask_initial(
void);
3930 if (
r->r.r_uber_thread ==
__kmp_threads[gtid] && !
r->r.r_affinity_assigned) {
3931 __kmp_affinity_set_init_mask(gtid,
TRUE);
3932 __kmp_affinity_bind_init_mask(gtid);
3933 r->r.r_affinity_assigned =
TRUE;
3937 if (!KMP_AFFINITY_CAPABLE())
3941 if (
r->r.r_uber_thread == th &&
r->r.r_affinity_assigned) {
3942 __kmp_set_system_affinity(__kmp_affin_origMask,
FALSE);
3943 KMP_CPU_COPY(th->th.th_affin_mask, __kmp_affin_origMask);
3944 r->r.r_affinity_assigned =
FALSE;
3948#define __kmp_assign_root_init_mask()
3963extern int __kmp_futex_determine_capable(
void);
3975extern void __kmp_create_monitor(
kmp_info_t *th);
4022 ompt_data_t ompt_parallel_data,
4045 size_t reduce_size,
void *reduce_data,
4046 void (*reduce)(
void *,
void *));
4071 int exit_teams = 0);
4108 size_t sizeof_kmp_task_t,
4109 size_t sizeof_shareds,
4140#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr) \
4142 __kmp_tasking_mode != tskm_task_teams || team->t.t_nproc == 1 || \
4143 thr->th.th_task_team == team->t.t_task_team[thr->th.th_task_state])
4145#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr)
4164 void **exit_frame_ptr
4177 size_t vector_length);
4238 size_t cpy_size,
void *cpy_data,
4239 void (*cpy_func)(
void *,
void *),
4254 size_t sizeof_kmp_task_t,
4255 size_t sizeof_shareds,
4305 bool serialize_immediate);
4334 int num,
void *
data);
4370 void **user_lock, uintptr_t hint);
4379static inline bool __kmp_tdg_is_recording(kmp_tdg_status_t
status) {
4380 return status == KMP_TDG_RECORDING;
4393 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4399 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4408 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4460 void *data_addr,
size_t pc_size);
4469#define KMPC_CONVENTION __cdecl
4471#define KMPC_CONVENTION
4504 char const *format);
4517#define KMP_DEVICE_DEFAULT -1
4518#define KMP_DEVICE_ALL -11
4575#define KMP_HIDDEN_HELPER_THREAD(gtid) \
4576 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4578#define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4579 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4581#define KMP_HIDDEN_HELPER_MAIN_THREAD(gtid) \
4582 ((gtid) == 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4584#define KMP_HIDDEN_HELPER_TEAM(team) \
4585 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4589#define KMP_GTID_TO_SHADOW_GTID(gtid) \
4590 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4596 int adjusted_gtid = gtid;
4601 return adjusted_gtid;
4619template <
bool C,
bool S>
4621template <
bool C,
bool S>
4623template <
bool C,
bool S>
4627#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4628template <
bool C,
bool S>
4630template <
bool C,
bool S>
4632template <
bool C,
bool S>
4636template <
bool C,
bool S>
4638template <
bool C,
bool S>
4640template <
bool C,
bool S>
4645template <
bool C,
bool S>
4648 int *thread_finished,
4653template <
bool C,
bool S>
4656 int *thread_finished,
4661template <
bool C,
bool S>
4664 int final_spin,
int *thread_finished,
4671 int *thread_finished,
4693 if (
f &&
f != stdout &&
f != stderr) {
4702 const char *env_var =
nullptr)
4712 const char *env_var =
nullptr) {
4714 f = fopen(filename,
mode);
4730 f = fopen(filename,
mode);
4748 operator FILE *() {
return f; }
4751template <
typename SourceType,
typename TargetType,
4752 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4753 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4754 bool isSourceSigned = std::is_signed<SourceType>::value,
4755 bool isTargetSigned = std::is_signed<TargetType>::value>
4759template <
typename SourceType,
typename TargetType>
4761 static TargetType
to(SourceType src) {
return (TargetType)src; }
4764template <
typename SourceType,
typename TargetType>
4766 static TargetType
to(SourceType src) {
return src; }
4769template <
typename SourceType,
typename TargetType>
4771 static TargetType
to(SourceType src) {
4773 (std::numeric_limits<TargetType>::max)()));
4775 (std::numeric_limits<TargetType>::min)()));
4776 return (TargetType)src;
4782template <
typename SourceType,
typename TargetType>
4784 static TargetType
to(SourceType src) {
4786 return (TargetType)src;
4790template <
typename SourceType,
typename TargetType>
4792 static TargetType
to(SourceType src) {
4794 return (TargetType)src;
4798template <
typename SourceType,
typename TargetType>
4799struct kmp_convert<SourceType, TargetType, false, false, true, false> {
4800 static TargetType
to(SourceType src) {
4803 (std::numeric_limits<TargetType>::max)()));
4804 return (TargetType)src;
4810template <
typename SourceType,
typename TargetType>
4812 static TargetType
to(SourceType src) {
return (TargetType)src; }
4815template <
typename SourceType,
typename TargetType>
4817 static TargetType
to(SourceType src) {
4819 (std::numeric_limits<TargetType>::max)()));
4820 return (TargetType)src;
4824template <
typename SourceType,
typename TargetType>
4825struct kmp_convert<SourceType, TargetType, false, false, false, true> {
4826 static TargetType
to(SourceType src) {
4828 (std::numeric_limits<TargetType>::max)()));
4829 return (TargetType)src;
4835template <
typename SourceType,
typename TargetType>
4836struct kmp_convert<SourceType, TargetType, true, false, false, false> {
4837 static TargetType
to(SourceType src) {
return (TargetType)src; }
4840template <
typename SourceType,
typename TargetType>
4841struct kmp_convert<SourceType, TargetType, false, true, false, false> {
4842 static TargetType
to(SourceType src) {
return src; }
4845template <
typename SourceType,
typename TargetType>
4846struct kmp_convert<SourceType, TargetType, false, false, false, false> {
4847 static TargetType
to(SourceType src) {
4849 (std::numeric_limits<TargetType>::max)()));
4850 return (TargetType)src;
4854template <
typename T1,
typename T2>
void * target(void *task)
int task_entry(kmp_int32 gtid, kmp_task_t *task)
This class safely opens and closes a C-style FILE* object using RAII semantics.
void set_stdout()
Set the FILE* object to stdout and output there No open call should happen before this call.
void set_stderr()
Set the FILE* object to stderr and output there No open call should happen before this call.
int try_open(const char *filename, const char *mode)
Instead of erroring out, return non-zero when unsuccessful fopen() for any reason.
kmp_safe_raii_file_t(const char *filename, const char *mode, const char *env_var=nullptr)
void open(const char *filename, const char *mode, const char *env_var=nullptr)
Open filename using mode.
kmp_int32(*)(kmp_int32, void *) kmp_routine_entry_t
struct kmp_task kmp_task_t
struct ident ident_t
The ident structure that describes a source location.
union kmp_cmplrdata kmp_cmplrdata_t
@ KMP_IDENT_BARRIER_IMPL_FOR
@ KMP_IDENT_KMPC
Use c-style ident structure.
@ KMP_IDENT_ATOMIC_HINT_CONTENDED
@ KMP_IDENT_BARRIER_IMPL_MASK
@ KMP_IDENT_BARRIER_IMPL_SECTIONS
@ KMP_IDENT_IMB
Use trampoline for internal microtasks.
@ KMP_IDENT_BARRIER_IMPL_WORKSHARE
@ KMP_IDENT_WORK_LOOP
To mark a static loop in OMPT callbacks.
@ KMP_IDENT_BARRIER_IMPL
To Mark implicit barriers.
@ KMP_IDENT_ATOMIC_HINT_UNCONTENDED
@ KMP_IDENT_WORK_SECTIONS
To mark a sections directive in OMPT callbacks.
@ KMP_IDENT_AUTOPAR
Entry point generated by auto-parallelization.
@ KMP_IDENT_ATOMIC_HINT_SPECULATIVE
@ KMP_IDENT_BARRIER_IMPL_SINGLE
@ KMP_IDENT_ATOMIC_HINT_MASK
Atomic hint; bottom four bits as omp_sync_hint_t.
@ KMP_IDENT_WORK_DISTRIBUTE
To mark a distribute construct in OMPT callbacks.
@ KMP_IDENT_OPENMP_SPEC_VERSION_MASK
@ KMP_IDENT_BARRIER_EXPL
To mark a 'barrier' directive in user code.
@ KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE
@ KMP_IDENT_ATOMIC_REDUCE
Compiler generates atomic reduction option for kmpc_reduce*.
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_fork_call_if(ident_t *loc, kmp_int32 nargs, kmpc_micro microtask, kmp_int32 cond, void *args)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_set_thread_limit(ident_t *loc, kmp_int32 global_tid, kmp_int32 thread_limit)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT bool __kmpc_omp_has_task_team(kmp_int32 gtid)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
KMP_EXPORT void ** __kmpc_omp_get_target_async_handle_ptr(kmp_int32 gtid)
void(* kmpc_dtor)(void *)
Pointer to the destructor function.
void *(* kmpc_cctor)(void *, void *)
Pointer to an alternate constructor.
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Array constructor.
void *(* kmpc_ctor)(void *)
Pointer to the constructor function.
KMP_EXPORT void * __kmpc_copyprivate_light(ident_t *loc, kmp_int32 gtid, void *cpy_data)
void *(* kmpc_ctor_vec)(void *, size_t)
Array constructor.
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void(* kmpc_dtor_vec)(void *, size_t)
Pointer to the array destructor function.
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
sched_type
Describes the loop schedule to be used for a parallel for loop.
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_next_section(ident_t *loc, kmp_int32 global_tid, kmp_int32 numberOfSections)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_sections(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_sections_init(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_nm_guided_chunked
guided unspecialized
@ kmp_sch_runtime_simd
runtime with chunk adjustment
@ kmp_nm_ord_dynamic_chunked
@ kmp_distribute_static_chunked
distribute static chunked
@ kmp_sch_static
static unspecialized
@ kmp_sch_guided_simd
guided with chunk adjustment
@ kmp_ord_dynamic_chunked
@ kmp_sch_modifier_monotonic
Set if the monotonic schedule modifier was present.
@ kmp_sch_default
default scheduling algorithm
@ kmp_sch_modifier_nonmonotonic
Set if the nonmonotonic schedule modifier was present.
@ kmp_nm_ord_static
ordered static unspecialized
@ kmp_distribute_static
distribute static unspecialized
@ kmp_sch_guided_chunked
guided unspecialized
@ kmp_sch_dynamic_chunked
@ kmp_sch_guided_analytical_chunked
@ kmp_sch_static_balanced
@ kmp_nm_static
static unspecialized
@ kmp_sch_lower
lower bound for unordered values
@ kmp_nm_guided_analytical_chunked
@ kmp_nm_upper
upper bound for nomerge values
@ kmp_ord_lower
lower bound for ordered values, must be power of 2
@ kmp_ord_static
ordered static unspecialized
@ kmp_sch_guided_iterative_chunked
@ kmp_sch_static_balanced_chunked
@ kmp_sch_upper
upper bound for unordered values
@ kmp_ord_upper
upper bound for ordered values
@ kmp_nm_lower
lower bound for nomerge values
@ kmp_nm_guided_iterative_chunked
@ kmp_ord_auto
ordered auto
@ kmp_nm_ord_static_chunked
@ kmp_nm_ord_guided_chunked
__itt_string_handle * name
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int initialized
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope ITT_FORMAT d const __itt_domain __itt_scope __itt_string_handle const char size_t ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_relation __itt_id ITT_FORMAT lu __itt_track_group __itt_string_handle __itt_track_group_type ITT_FORMAT d __itt_track ITT_FORMAT p void int const int int const char int ITT_FORMAT d void void const char * path
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event event
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t mode
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team)
struct kmp_disp kmp_disp_t
int __kmp_memkind_available
omp_memspace_handle_t const omp_default_mem_space
KMP_EXPORT void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_finish_implicit_task(kmp_info_t *this_thr)
void * omp_memspace_handle_t
volatile kmp_team_t * __kmp_team_pool
KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(ident_t *loc_ref, kmp_int32 gtid)
int __kmp_pause_resource(kmp_pause_status_t level)
void * omp_allocator_handle_t
void __kmp_warn(char const *format,...)
void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL)
void KMPC_SET_DYNAMIC(int flag)
kmp_bar_pat_e __kmp_barrier_release_pat_dflt
struct kmp_dephash kmp_dephash_t
kmp_info_t * __kmp_hidden_helper_main_thread
int __kmp_generate_warnings
volatile int __kmp_init_user_locks
int __kmp_debug_buf_lines
omp_allocator_handle_t __kmpc_init_allocator(int gtid, omp_memspace_handle_t, int ntraits, omp_alloctrait_t traits[])
kmp_proc_bind_t __kmp_teams_proc_bind
KMP_EXPORT void KMPC_CONVENTION kmpc_set_library(int)
kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker)
void ompc_set_dynamic(int flag)
kmp_bootstrap_lock_t __kmp_initz_lock
void __kmp_aux_set_defaults(char const *str, size_t len)
int __kmp_display_env_verbose
omp_allocator_handle_t const omp_cgroup_mem_alloc
kmp_global_t __kmp_global
void __kmp_set_schedule(int gtid, kmp_sched_t new_sched, int chunk)
void __kmp_init_target_mem()
KMP_EXPORT void * kmpc_malloc(size_t size)
void __kmp_hidden_helper_worker_thread_signal()
void __kmp_teams_master(int gtid)
void __kmp_elapsed_tick(double *)
void __kmp_common_destroy(void)
void __kmp_common_initialize(void)
#define KMP_HASH_TABLE_SIZE
void __kmp_release_64(kmp_flag_64<> *flag)
void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al)
kmp_pause_status_t __kmp_pause_status
struct kmp_teams_size kmp_teams_size_t
kmp_lock_t __kmp_debug_lock
enum kmp_target_offload_kind kmp_target_offload_kind_t
void __kmp_read_system_time(double *delta)
KMP_NORETURN void __kmp_abort_process(void)
void __kmp_free_thread(kmp_info_t *)
KMP_EXPORT kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task)
void __kmp_enable(int old_state)
kmp_bootstrap_lock_t __kmp_tp_cached_lock
void __kmp_check_stack_overlap(kmp_info_t *thr)
struct kmp_base_root kmp_base_root_t
void * __kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al)
void __kmp_infinite_loop(void)
kmp_info_t * __kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team, int tid)
KMP_EXPORT void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_team_t * __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc, kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs, int argc USE_NESTED_HOT_ARG(kmp_info_t *thr))
void __kmp_reap_task_teams(void)
omp_memspace_handle_t const llvm_omp_target_host_mem_space
kmp_int32 __kmp_use_yield
char const * __kmp_barrier_type_name[bs_last_barrier]
char const * __kmp_barrier_pattern_name[bp_last_bar]
int __kmp_dflt_team_nth_ub
void __kmp_hidden_helper_threads_initz_wait()
void __kmp_pop_task_team_node(kmp_info_t *thread, kmp_team_t *team)
int __kmp_aux_get_num_teams()
KMP_EXPORT int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
struct dispatch_shared_info dispatch_shared_info_t
struct kmp_taskgroup kmp_taskgroup_t
struct kmp_hws_item kmp_hws_item_t
void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr, kmp_team_t *team)
void __kmp_wait_to_unref_task_teams(void)
struct KMP_ALIGN_CACHE kmp_base_info kmp_base_info_t
void __kmp_wait_4_ptr(void *spinner, kmp_uint32 checker, kmp_uint32(*pred)(void *, kmp_uint32), void *obj)
void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team)
int __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, void *reduce_data, void(*reduce)(void *, void *))
struct KMP_ALIGN_CACHE dispatch_private_info dispatch_private_info_t
int __kmp_get_max_active_levels(int gtid)
KMP_EXPORT void * kmpc_aligned_malloc(size_t size, size_t alignment)
void ___kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al)
void __kmp_aux_set_library(enum library_type arg)
void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size, char const *format,...)
void __kmp_push_num_teams_51(ident_t *loc, int gtid, int num_teams_lb, int num_teams_ub, int num_threads)
#define __kmp_assign_root_init_mask()
int __kmp_dflt_max_active_levels
KMP_EXPORT void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32 num_dims, const struct kmp_dim *dims)
void __kmp_unlock_suspend_mx(kmp_info_t *th)
kmp_bar_pat_e __kmp_barrier_gather_pat_dflt
unsigned short __kmp_get_random(kmp_info_t *thread)
void * __kmpc_calloc(int gtid, size_t nmemb, size_t sz, omp_allocator_handle_t al)
static kmp_team_t * __kmp_team_from_gtid(int gtid)
int __kmp_register_root(int initial_thread)
int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
omp_memspace_handle_t const omp_low_lat_mem_space
void __kmp_do_initialize_hidden_helper_threads()
int __kmp_storage_map_verbose_specified
struct kmp_local kmp_local_t
omp_allocator_handle_t __kmpc_get_default_allocator(int gtid)
kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier]
void __kmp_thread_sleep(int millis)
KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part)
kmp_tasking_mode_t __kmp_tasking_mode
char * __kmp_affinity_format
void __kmp_abort_thread(void)
volatile kmp_info_t * __kmp_thread_pool
void __kmp_internal_end_atexit(void)
kmp_hws_item_t __kmp_hws_die
KMP_EXPORT void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar)
volatile int __kmp_init_gtid
omp_allocator_handle_t __kmp_def_allocator
kmp_hws_item_t __kmp_hws_node
union KMP_ALIGN_CACHE kmp_sleep_team kmp_sleep_team_t
kmp_bootstrap_lock_t __kmp_task_team_lock
void * __kmp_alloc(int gtid, size_t align, size_t sz, omp_allocator_handle_t al)
int __kmp_omp_cancellation
void __kmp_suspend_64(int th_gtid, kmp_flag_64< C, S > *flag)
static void __kmp_resume_if_hard_paused()
kmp_hws_item_t __kmp_hws_tile
void * __kmp_calloc(int gtid, size_t align, size_t nmemb, size_t sz, omp_allocator_handle_t al)
kmp_nested_proc_bind_t __kmp_nested_proc_bind
void __kmp_free_implicit_task(kmp_info_t *this_thr)
KMP_EXPORT void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void KMP_EXPAND_NAME() ompc_set_affinity_format(char const *format)
void __kmp_hidden_helper_main_thread_release()
void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, void *data_addr, size_t pc_size)
fork_context_e
Tell the fork call which compiler generated the fork call, and therefore how to deal with the call.
@ fork_context_gnu
Called from GNU generated code, so must not invoke the microtask internally.
@ fork_context_intel
Called from Intel generated code.
void __kmp_exit_single(int gtid)
struct KMP_ALIGN_CACHE dispatch_private_info32 dispatch_private_info32_t
void __kmp_suspend_initialize(void)
int __kmp_get_team_size(int gtid, int level)
kmp_nested_nthreads_t __kmp_nested_nth
KMP_EXPORT void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind)
omp_allocator_handle_t const omp_default_mem_alloc
kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker)
kmp_cached_addr_t * __kmp_threadpriv_cache_list
@ atomic_flag64
atomic 64 bit flags
@ flag_oncore
special 64-bit flag for on-core barrier (hierarchical)
@ flag32
atomic 32 bit flags
void __kmp_internal_end_dtor(void)
kmp_uint64 __kmp_now_nsec()
KMP_EXPORT int __kmp_get_cancellation_status(int cancel_kind)
void KMP_EXPAND_NAME() ompc_display_affinity(char const *format)
volatile int __kmp_all_nth
void __kmp_check_stksize(size_t *val)
kmp_target_offload_kind_t __kmp_target_offload
int __kmp_debug_buf_chars
int __kmpc_get_target_offload()
void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref)
KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid, kmp_critical_name *, uint32_t hint)
int __kmp_get_global_thread_id_reg(void)
void __kmp_push_num_threads(ident_t *loc, int gtid, int num_threads)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize(int)
#define SCHEDULE_HAS_MONOTONIC(s)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
int __kmp_is_address_mapped(void *addr)
kmp_lock_t __kmp_global_lock
int __kmp_barrier_gomp_cancel(int gtid)
double __kmp_read_cpu_time(void)
void __kmpc_destroy_allocator(int gtid, omp_allocator_handle_t al)
union KMP_ALIGN_CACHE kmp_root kmp_root_t
int __kmp_adjust_gtid_mode
#define __kmp_entry_gtid()
kmp_old_threads_list_t * __kmp_old_threads_list
void __kmp_internal_end_library(int gtid)
void __kmp_suspend_32(int th_gtid, kmp_flag_32< C, S > *flag)
struct kmp_internal_control kmp_internal_control_t
void __kmp_hidden_helper_worker_thread_wait()
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
volatile int __kmp_init_common
void __kmp_set_max_active_levels(int gtid, int new_max_active_levels)
enum sched_type __kmp_auto
void __kmp_init_random(kmp_info_t *thread)
static int __kmp_tid_from_gtid(int gtid)
static bool KMP_UBER_GTID(int gtid)
kmp_int32 __kmp_use_yield_exp_set
kmp_event_t * __kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid, kmp_task_t *task)
void __kmp_internal_end_thread(int gtid)
struct kmp_sys_info kmp_sys_info_t
KMP_EXPORT void __kmp_set_num_teams(int num_teams)
void __kmp_disable(int *old_state)
omp_allocator_handle_t const omp_large_cap_mem_alloc
volatile int __kmp_init_hidden_helper
struct kmp_depend_info kmp_depend_info_t
void __kmp_user_set_library(enum library_type arg)
const char * __kmp_hw_get_catalog_string(kmp_hw_t type, bool plural=false)
KMP_EXPORT void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
void __kmpc_set_default_allocator(int gtid, omp_allocator_handle_t al)
omp_allocator_handle_t const omp_low_lat_mem_alloc
@ KMP_EVENT_UNINITIALIZED
@ KMP_EVENT_ALLOW_COMPLETION
void __kmp_elapsed(double *)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_disp_num_buffers(int)
int __kmp_gtid_get_specific(void)
int __kmp_aux_get_team_num()
struct KMP_ALIGN_CACHE dispatch_private_info64 dispatch_private_info64_t
KMP_EXPORT void __kmpc_taskloop(ident_t *loc, kmp_int32 gtid, kmp_task_t *task, kmp_int32 if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup, kmp_int32 sched, kmp_uint64 grainsize, void *task_dup)
volatile int __kmp_init_middle
void __kmp_hidden_helper_threads_deinitz_wait()
omp_allocator_handle_t const omp_high_bw_mem_alloc
void __kmp_set_num_threads(int new_nth, int gtid)
std::atomic< kmp_int32 > __kmp_task_counter
void __kmpc_error(ident_t *loc, int severity, const char *message)
static kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind)
KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task)
KMP_EXPORT kmp_task_t * __kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry)
KMP_EXPORT void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid)
kmp_r_sched_t __kmp_get_schedule_global(void)
int __kmp_storage_map_verbose
int __kmp_allThreadsSpecified
enum sched_type __kmp_static
int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
void kmpc_set_blocktime(int arg)
KMP_EXPORT void __kmpc_taskloop_5(ident_t *loc, kmp_int32 gtid, kmp_task_t *task, kmp_int32 if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup, kmp_int32 sched, kmp_uint64 grainsize, kmp_int32 modifier, void *task_dup)
KMP_EXPORT void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task)
PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
enum kmp_tasking_mode kmp_tasking_mode_t
void * __kmp_realloc(int gtid, void *ptr, size_t sz, omp_allocator_handle_t al, omp_allocator_handle_t free_al)
void __kmp_end_split_barrier(enum barrier_type bt, int gtid)
int PACKED_REDUCTION_METHOD_T
std::atomic< int > __kmp_thread_pool_active_nth
void __kmp_hidden_helper_threads_initz_routine()
KMP_EXPORT void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid)
const char * __kmp_hw_get_keyword(kmp_hw_t type, bool plural=false)
union KMP_ALIGN_CACHE kmp_thread_data kmp_thread_data_t
kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker)
KMP_EXPORT void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, const kmp_int64 *vec)
int __kmp_affinity_num_places
int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws)
int __kmp_duplicate_library_ok
void * ___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL)
struct kmp_base_data kmp_base_data_t
struct kmp_base_thread_data kmp_base_thread_data_t
volatile int __kmp_need_register_serial
#define KMP_PAD(type, sz)
void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team)
kmp_bootstrap_lock_t __kmp_forkjoin_lock
KMP_EXPORT kmp_uint64 __kmpc_get_taskid()
omp_memspace_handle_t const omp_const_mem_space
struct kmp_cg_root kmp_cg_root_t
kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier]
KMP_EXPORT int KMPC_CONVENTION kmpc_get_affinity_mask_proc(int, kmp_affinity_mask_t *)
static kmp_info_t * __kmp_entry_thread()
KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid)
void __kmp_init_memkind()
struct kmp_task_affinity_info kmp_task_affinity_info_t
int __kmp_get_ancestor_thread_num(int gtid, int level)
void __kmp_hidden_helper_main_thread_wait()
void * __kmp_launch_thread(kmp_info_t *thr)
void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr, kmp_team_t *team, int tid, int set_curr_task)
kmp_int32 __kmp_default_device
void __kmp_omp_display_env(int verbose)
void __kmp_cleanup_threadprivate_caches()
void __kmp_middle_initialize(void)
static void copy_icvs(kmp_internal_control_t *dst, kmp_internal_control_t *src)
KMP_EXPORT void __kmpc_end_taskgroup(ident_t *loc, int gtid)
kmp_bootstrap_lock_t __kmp_exit_lock
KMP_EXPORT void __kmpc_omp_taskwait_deps_51(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list, kmp_int32 has_no_wait)
omp_memspace_handle_t const omp_large_cap_mem_space
int __kmp_force_monotonic
kmp_info_t ** __kmp_threads
void __kmp_abort(char const *format,...)
void __kmp_hidden_helper_initz_release()
enum sched_type __kmp_sched
void __kmp_suspend_uninitialize_thread(kmp_info_t *th)
void * ___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL)
struct kmp_cached_addr kmp_cached_addr_t
int __kmp_enable_task_throttling
void __kmp_unregister_root(int gtid)
void __kmp_finalize_bget(kmp_info_t *th)
static void __kmp_reset_root_init_mask(int gtid)
kmp_uint32 __kmp_barrier_gather_bb_dflt
KMP_EXPORT void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task)
kmp_uint32 __kmp_barrier_release_bb_dflt
struct dispatch_shared_info32 dispatch_shared_info32_t
int __kmp_task_stealing_constraint
int __kmp_need_register_atfork
struct private_common * kmp_threadprivate_insert(int gtid, void *pc_addr, void *data_addr, size_t pc_size)
void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref)
struct kmp_target_data kmp_target_data_t
int __kmp_dispatch_num_buffers
KMP_EXPORT void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
#define SCHEDULE_WITHOUT_MODIFIERS(s)
kmp_uint32 __kmp_yield_init
KMP_EXPORT void __kmp_set_teams_thread_limit(int limit)
void __kmp_internal_end_dest(void *)
void * __kmpc_realloc(int gtid, void *ptr, size_t sz, omp_allocator_handle_t al, omp_allocator_handle_t free_al)
void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag)
bool __kmp_dflt_max_active_levels_set
void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr, kmp_team_t *team)
size_t KMP_EXPAND_NAME() ompc_get_affinity_format(char *buffer, size_t size)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_defaults(char const *)
void __kmp_lock_suspend_mx(kmp_info_t *th)
struct dispatch_shared_info64 dispatch_shared_info64_t
omp_memspace_handle_t const llvm_omp_target_shared_mem_space
char * __kmp_debug_buffer
omp_memspace_handle_t const omp_high_bw_mem_space
void __kmp_parallel_initialize(void)
void __kmp_terminate_thread(int gtid)
int __kmp_nesting_mode_nlevels
void __kmp_set_nesting_mode_threads()
void __kmp_unregister_library(void)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
See __kmpc_dispatch_next_4.
int(* launch_t)(int gtid)
int __kmp_ignore_mppbeg(void)
void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size)
int * __kmp_nesting_nth_level
KMP_EXPORT void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, const kmp_int64 *vec)
omp_allocator_handle_t const omp_const_mem_alloc
KMP_EXPORT void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
volatile int __kmp_init_parallel
omp_allocator_handle_t const omp_pteam_mem_alloc
kmp_queuing_lock_t __kmp_dispatch_lock
KMP_EXPORT int KMPC_CONVENTION kmpc_set_affinity_mask_proc(int, kmp_affinity_mask_t *)
omp_allocator_handle_t const llvm_omp_target_host_mem_alloc
int __kmp_need_register_atfork_specified
omp_allocator_handle_t const kmp_max_mem_alloc
kmp_int32 __kmp_enable_hidden_helper
struct kmp_desc_base kmp_desc_base_t
enum kmp_sched kmp_sched_t
void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team)
void __kmp_aux_set_stacksize(size_t arg)
static const size_t KMP_AFFINITY_FORMAT_SIZE
enum library_type __kmp_library
void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid)
void __kmp_push_num_teams(ident_t *loc, int gtid, int num_teams, int num_threads)
struct kmp_tasking_flags kmp_tasking_flags_t
omp_memspace_handle_t kmp_memspace_t
static bool __kmp_is_hybrid_cpu()
void __kmp_clear_system_time(void)
KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid()
struct KMP_ALIGN_CACHE kmp_base_team kmp_base_team_t
size_t __kmp_aux_capture_affinity(int gtid, const char *format, kmp_str_buf_t *buffer)
KMP_EXPORT int __kmp_get_max_teams(void)
void KMPC_SET_NESTED(int flag)
void(* kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,...)
void __kmp_get_schedule(int gtid, kmp_sched_t *sched, int *chunk)
kmp_team_t * __kmp_reap_team(kmp_team_t *)
kmp_key_t __kmp_gtid_threadprivate_key
KMP_EXPORT void * __kmpc_threadprivate(ident_t *, kmp_int32 global_tid, void *data, size_t size)
struct kmp_task_pri kmp_task_pri_t
kmp_hws_item_t __kmp_hws_socket
KMP_EXPORT void KMPC_CONVENTION ompc_set_schedule(omp_sched_t, int)
int __kmp_fork_call(ident_t *loc, int gtid, enum fork_context_e fork_context, kmp_int32 argc, microtask_t microtask, launch_t invoker, kmp_va_list ap)
kmp_info_t * __kmp_thread_pool_insert_pt
KMP_EXPORT kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind)
KMP_EXPORT void * kmpc_calloc(size_t nelem, size_t elsize)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
See __kmpc_dispatch_next_4.
int __kmp_get_global_thread_id(void)
#define USE_NESTED_HOT_ARG(x)
int __kmp_env_consistency_check
#define bs_reduction_barrier
void __kmp_runtime_destroy(void)
kmp_uint64 __kmp_pause_init
kmp_uint64 __kmp_taskloop_min_tasks
KMP_EXPORT int KMPC_CONVENTION ompc_get_ancestor_thread_num(int)
union KMP_ALIGN_CACHE kmp_desc kmp_desc_t
char const * __kmp_barrier_branch_bit_env_name[bs_last_barrier]
kmp_hws_item_t __kmp_hws_proc
void __kmp_aux_display_affinity(int gtid, const char *format)
static void __kmp_sched_apply_mods_intkind(kmp_sched_t kind, enum sched_type *internal_kind)
void __kmp_fulfill_event(kmp_event_t *event)
KMP_EXPORT void __kmpc_taskgroup(ident_t *loc, int gtid)
int __kmp_read_system_info(struct kmp_sys_info *info)
void * ___kmp_thread_realloc(kmp_info_t *th, void *ptr, size_t size KMP_SRC_LOC_DECL)
kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker)
volatile int __kmp_hidden_helper_team_done
KMP_EXPORT kmp_depnode_list_t * __kmpc_task_get_successors(kmp_task_t *task)
void __kmp_push_proc_bind(ident_t *loc, int gtid, kmp_proc_bind_t proc_bind)
static void __kmp_sched_apply_mods_stdkind(kmp_sched_t *kind, enum sched_type internal_kind)
struct kmp_base_depnode kmp_base_depnode_t
void __kmp_init_nesting_mode()
void __kmp_free_team(kmp_root_t *, kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *))
std::atomic< kmp_int32 > __kmp_unexecuted_hidden_helper_tasks
KMP_EXPORT int KMPC_CONVENTION kmpc_unset_affinity_mask_proc(int, kmp_affinity_mask_t *)
KMP_EXPORT void __kmpc_end_scope(ident_t *loc, kmp_int32 gtid, void *reserved)
KMP_EXPORT int __kmpc_invoke_task_func(int gtid)
void * __kmpc_aligned_alloc(int gtid, size_t align, size_t sz, omp_allocator_handle_t al)
size_t __kmp_sys_min_stksize
char __kmp_blocktime_units
void * ___kmp_allocate(size_t size KMP_SRC_LOC_DECL)
KMP_EXPORT void KMPC_CONVENTION ompc_set_max_active_levels(int)
struct kmp_sched_flags kmp_sched_flags_t
kmp_hws_item_t __kmp_hws_core
union KMP_ALIGN_CACHE kmp_ordered_team kmp_ordered_team_t
int __kmp_invoke_task_func(int gtid)
struct kmp_base_global kmp_base_global_t
void ompc_set_nested(int flag)
kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier]
KMP_EXPORT void __kmpc_scope(ident_t *loc, kmp_int32 gtid, void *reserved)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize_s(size_t)
size_t __kmp_malloc_pool_incr
static int __kmp_adjust_gtid_for_hidden_helpers(int gtid)
kmp_task_t * __kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry)
void __kmp_adjust_num_threads(int new_nproc)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
KMP_EXPORT void kmpc_free(void *ptr)
int __kmp_threads_capacity
KMP_EXPORT int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_info_t ** __kmp_hidden_helper_threads
kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker)
void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team, int tid)
int __kmp_debug_buf_warn_chars
static int __kmp_gtid_from_tid(int tid, const kmp_team_t *team)
KMP_EXPORT void KMPC_FOR_STATIC_INIT(ident_t *loc, kmp_int32 global_tid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int *plower, kmp_int *pupper, kmp_int *pstride, kmp_int incr, kmp_int chunk)
void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag)
bool __kmp_wpolicy_passive
void __kmp_save_internal_controls(kmp_info_t *thread)
size_t KMP_EXPAND_NAME() ompc_capture_affinity(char *buffer, size_t buf_size, char const *format)
void __kmp_push_task_team_node(kmp_info_t *thread, kmp_team_t *team)
void __kmp_threadprivate_resize_cache(int newCapacity)
union kmp_r_sched kmp_r_sched_t
void __kmp_runtime_initialize(void)
int __kmp_invoke_teams_master(int gtid)
void __kmp_hidden_helper_initialize()
volatile int __kmp_init_hidden_helper_threads
void KMPC_SET_NUM_THREADS(int arg)
KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind)
void __kmp_common_destroy_gtid(int gtid)
int __kmp_try_suspend_mx(kmp_info_t *th)
static void __kmp_aux_convert_blocktime(int *bt)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
See __kmpc_dispatch_next_4.
int __kmp_display_affinity
enum sched_type __kmp_guided
void __kmp_resume_64(int target_gtid, kmp_flag_64< C, S > *flag)
void __kmp_resume_32(int target_gtid, kmp_flag_32< C, S > *flag)
KMP_EXPORT int __kmp_get_teams_thread_limit(void)
#define KMP_INLINE_ARGV_ENTRIES
int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
const char * __kmp_hw_get_core_type_string(kmp_hw_core_type_t type)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method
void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64< C, S > *flag)
KMP_EXPORT int KMPC_CONVENTION ompc_get_team_size(int)
void * kmp_affinity_mask_t
void __kmp_serial_initialize(void)
omp_allocator_handle_t const omp_thread_mem_alloc
kmp_uint32 __kmp_wait_4(kmp_uint32 volatile *spinner, kmp_uint32 checker, kmp_uint32(*pred)(kmp_uint32, kmp_uint32), void *obj)
void __kmp_resume_if_soft_paused()
KMP_EXPORT void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
kmp_int32 __kmp_max_task_priority
void __kmp_initialize_bget(kmp_info_t *th)
static void __kmp_assert_valid_gtid(kmp_int32 gtid)
int __kmp_teams_thread_limit
KMP_EXPORT void * kmpc_realloc(void *ptr, size_t size)
void __kmp_cleanup_hierarchy()
KMP_EXPORT kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id)
KMP_EXPORT void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_serialized_parallel(ident_t *id, kmp_int32 gtid)
void ompc_set_num_threads(int arg)
kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task, bool serialize_immediate)
struct kmp_base_task_team kmp_base_task_team_t
void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr)
void __kmp_gtid_set_specific(int gtid)
char const * __kmp_barrier_pattern_env_name[bs_last_barrier]
void __kmp_internal_begin(void)
std::atomic< int > __kmp_debug_count
void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team)
int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
static kmp_info_t * __kmp_thread_from_gtid(int gtid)
void __kmp_expand_file_name(char *result, size_t rlen, char *pattern)
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
void * ___kmp_thread_calloc(kmp_info_t *th, size_t nelem, size_t elsize KMP_SRC_LOC_DECL)
static int __kmp_gtid_from_thread(const kmp_info_t *thr)
omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc
@ KMP_HW_MAX_NUM_CORE_TYPES
@ KMP_HW_CORE_TYPE_UNKNOWN
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
See __kmpc_dispatch_init_4.
void __kmp_suspend_initialize_thread(kmp_info_t *th)
volatile int __kmp_init_serial
@ reduction_method_not_defined
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
See __kmpc_dispatch_init_4.
void __kmp_exit_thread(int exit_status)
KMP_EXPORT kmp_base_depnode_t * __kmpc_task_get_depnode(kmp_task_t *task)
int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int npr, int argc, void *argv[])
kmp_int32 __kmp_hidden_helper_threads_num
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
static void __kmp_type_convert(T1 src, T2 *dest)
void __kmp_join_call(ident_t *loc, int gtid, int exit_teams=0)
enum kmp_bar_pat kmp_bar_pat_e
void __kmp_fini_memkind()
KMP_EXPORT kmp_int32 __kmp_get_reduce_method(void)
omp_memspace_handle_t const llvm_omp_target_device_mem_space
int __kmp_ignore_mppend(void)
void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag, int final_spin)
int __kmp_debug_buf_atomic
struct KMP_ALIGN_CACHE kmp_bstate kmp_bstate_t
KMP_EXPORT void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier]
std::atomic< kmp_int32 > __kmp_team_counter
void __kmp_reap_worker(kmp_info_t *th)
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
See __kmpc_dispatch_init_4.
void __kmp_hidden_helper_threads_deinitz_release()
void __kmp_expand_host_name(char *buffer, size_t size)
int __kmpc_pause_resource(kmp_pause_status_t level)
union KMP_ALIGN_CACHE kmp_info kmp_info_t
enum sched_type __kmp_sch_map[]
void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team, int wait=1)
kmp_uint64 __kmp_hardware_timestamp(void)
void ___kmp_free(void *ptr KMP_SRC_LOC_DECL)
union KMP_ALIGN_CACHE kmp_time_global kmp_time_global_t
omp_allocator_handle_t const llvm_omp_target_device_mem_alloc
union KMP_ALIGN_CACHE kmp_global kmp_global_t
omp_allocator_handle_t const omp_null_allocator
kmp_uint32 __kmp_yield_next
void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid)
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int16
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int8
#define KMP_BUILD_ASSERT(expr)
#define KMP_DEBUG_ASSERT(cond)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
void __kmp_fatal(kmp_msg_t message,...)
kmp_ticket_lock_t kmp_lock_t
void(* microtask_t)(int *gtid, int *npr,...)
#define INTERNODE_CACHE_LINE
#define KMP_ATTRIBUTE_TARGET_WAITPKG
#define KMP_EXPAND_NAME(api_name)
__attribute__((noinline))
void microtask(int *global_tid, int *bound_tid)
struct private_common * data[KMP_HASH_TABLE_SIZE]
struct dispatch_private_info * next
std::atomic< kmp_uint32 > steal_flag
kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1]
volatile kmp_uint32 ordered_iteration
volatile kmp_uint32 iteration
volatile kmp_int32 num_done
volatile kmp_int64 num_done
volatile kmp_uint64 iteration
kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3]
volatile kmp_uint64 ordered_iteration
volatile kmp_uint32 buffer_index
kmp_int32 doacross_num_done
union dispatch_shared_info::shared_info u
volatile kmp_int32 doacross_buf_idx
volatile kmp_uint32 * doacross_flags
The ident structure that describes a source location.
kmp_int32 get_openmp_version()
char const * psource
String describing the source location.
kmp_int32 reserved_1
might be used in Fortran; see above
kmp_int32 reserved_2
not really used in Fortran any more; see above
kmp_int32 reserved_3
source[4] in Fortran, do not use for C++
kmp_int32 flags
also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member
kmp_allocator_t * fb_data
omp_alloctrait_value_t fb
omp_memspace_handle_t memspace
volatile kmp_uint32 t_value
std::atomic< kmp_int32 > npredecessors
std::atomic< kmp_int32 > nrefs
kmp_lock_t * mtx_locks[MAX_MTX_DEPS]
kmp_depnode_list_t * successors
enum dynamic_mode g_dynamic_mode
KMP_ALIGN_CACHE int th_set_nproc
kmp_cg_root_t * th_cg_roots
kmp_taskdata_t * th_current_task
KMP_ALIGN_CACHE kmp_team_p * th_serial_team
kmp_task_team_t * th_task_team
kmp_info_p * th_next_pool
kmp_uint64 th_team_bt_intervals
microtask_t th_teams_microtask
KMP_ALIGN_CACHE volatile kmp_int32 th_next_waiting
struct cons_header * th_cons
struct private_common * th_pri_head
omp_allocator_handle_t th_def_allocator
kmp_uint8 th_active_in_pool
std::atomic< kmp_uint32 > th_used_in_team
struct common_table * th_pri_common
kmp_teams_size_t th_teams_size
volatile void * th_sleep_loc
volatile kmp_uint32 th_spin_here
flag_type th_sleep_loc_type
kmp_proc_bind_t th_set_proc_bind
kmp_info_p * th_team_master
kmp_info_t * r_uber_thread
std::atomic< int > r_in_parallel
kmp_int32 tt_found_proxy_tasks
KMP_ALIGN_CACHE std::atomic< kmp_int32 > tt_unfinished_threads
kmp_bootstrap_lock_t tt_task_pri_lock
std::atomic< kmp_int32 > tt_num_task_pri
kmp_bootstrap_lock_t tt_threads_lock
kmp_int32 tt_untied_task_encountered
kmp_task_pri_t * tt_task_pri_list
kmp_int32 tt_hidden_helper_task_encountered
kmp_thread_data_t * tt_threads_data
KMP_ALIGN_CACHE volatile kmp_uint32 tt_active
kmp_task_team_t * tt_next
omp_allocator_handle_t t_def_allocator
kmp_proc_bind_t t_proc_bind
KMP_ALIGN_CACHE void ** t_argv
kmp_taskdata_t * t_implicit_task_taskdata
std::atomic< kmp_int32 > t_cancel_request
KMP_ALIGN_CACHE kmp_info_t ** t_threads
dispatch_shared_info_t * t_disp_buffer
KMP_ALIGN_CACHE kmp_internal_control_t * t_control_stack_top
KMP_ALIGN_CACHE int t_max_argc
std::atomic< int > t_construct
KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered
KMP_ALIGN_CACHE int t_master_tid
kmp_int32 td_deque_ntasks
kmp_taskdata_t ** td_deque
kmp_int32 td_deque_last_stolen
kmp_bootstrap_lock_t td_deque_lock
kmp_uint32 * skip_per_level
KMP_ALIGN_CACHE volatile kmp_uint64 b_arrived
kmp_uint8 use_oncore_barrier
struct kmp_bstate * parent_bar
kmp_internal_control_t th_fixed_icvs
struct kmp_cached_addr * next
kmp_int32 cg_thread_limit
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
struct kmp_depend_info::@8::@10 flags
kmp_dephash_entry_t * next_in_bucket
kmp_depnode_list_t * last_set
kmp_depnode_list_t * prev_set
kmp_dephash_entry_t ** buckets
kmp_depnode_list_t * next
void(* th_dxo_fcn)(int *gtid, int *cid, ident_t *)
kmp_int32 th_doacross_buf_idx
volatile kmp_uint32 * th_doacross_flags
dispatch_private_info_t * th_dispatch_pr_current
kmp_int64 * th_doacross_info
dispatch_private_info_t * th_disp_buffer
void(* th_deo_fcn)(int *gtid, int *cid, ident_t *)
dispatch_shared_info_t * th_dispatch_sh_current
kmp_proc_bind_t proc_bind
struct kmp_internal_control * next
PACKED_REDUCTION_METHOD_T packed_reduction_method
volatile int this_construct
kmp_proc_bind_t * bind_types
struct kmp_old_threads_list_t * next
int length[KMP_MAX_FIELDS]
int offset[KMP_MAX_FIELDS]
struct kmp_task_affinity_info::@11 flags
kmp_task_team_list_t * next
kmp_task_team_t * task_team
void * shareds
pointer to block of pointers to shared vars
kmp_int32 part_id
part id for the task
kmp_routine_entry_t routine
pointer to routine to call for executing task
kmp_uint32 td_taskwait_counter
ident_t * td_taskwait_ident
kmp_task_team_t * td_task_team
kmp_dephash_t * td_dephash
kmp_taskdata_t * td_parent
std::atomic< kmp_int32 > td_incomplete_child_tasks
std::atomic< kmp_int32 > td_untied_count
kmp_taskgroup_t * td_taskgroup
kmp_info_p * td_alloc_thread
kmp_depnode_t * td_depnode
kmp_int32 td_taskwait_thread
kmp_tasking_flags_t td_flags
kmp_taskdata_t * td_last_tied
KMP_ALIGN_CACHE kmp_internal_control_t td_icvs
kmp_event_t td_allow_completion_event
kmp_target_data_t td_target_data
KMP_ALIGN_CACHE std::atomic< kmp_int32 > td_allocated_child_tasks
std::atomic< kmp_int32 > cancel_request
std::atomic< kmp_int32 > count
struct kmp_taskgroup * parent
kmp_int32 reduce_num_data
unsigned priority_specified
unsigned destructors_thunk
struct private_common * next
struct private_common * link
struct private_data * next
union shared_common::@4 cct
union shared_common::@3 ct
union shared_common::@5 dt
struct private_data * pod_init
struct shared_common * next
struct shared_common * data[KMP_HASH_TABLE_SIZE]
dispatch_private_info64_t p64
dispatch_private_info32_t p32
dispatch_shared_info64_t s64
dispatch_shared_info32_t s32
kmp_routine_entry_t destructors
kmp_int32 priority
priority specified by user for the task
enum sched_type r_sched_type
kmp_base_thread_data_t td
kmp_uint64 __kmp_ticks_per_usec
void __kmp_reap_monitor(kmp_info_t *th)
kmp_uint64 __kmp_ticks_per_msec
int __kmp_read_from_file(char const *path, char const *format,...)
void __kmp_register_atfork(void)
void __kmp_free_handle(kmp_thread_t tHandle)
void __kmp_affinity_determine_capable(const char *env_var)
void __kmp_affinity_bind_thread(int proc)
int __kmp_get_load_balance(int max)
int __kmp_still_running(kmp_info_t *th)
void __kmp_initialize_system_tick(void)
int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val)