17#include "kmp_config.h"
27#ifndef KMP_STATIC_STEAL_ENABLED
28#define KMP_STATIC_STEAL_ENABLED 1
30#define KMP_WEIGHTED_ITERATIONS_SUPPORTED \
31 (KMP_AFFINITY_SUPPORTED && KMP_STATIC_STEAL_ENABLED && \
32 (KMP_ARCH_X86 || KMP_ARCH_X86_64))
34#define TASK_CURRENT_NOT_QUEUED 0
35#define TASK_CURRENT_QUEUED 1
37#define TASK_NOT_PUSHED 1
38#define TASK_SUCCESSFULLY_PUSHED 0
41#define TASK_EXPLICIT 1
42#define TASK_IMPLICIT 0
45#define TASK_DETACHABLE 1
46#define TASK_UNDETACHABLE 0
48#define KMP_CANCEL_THREADS
49#define KMP_THREAD_ATTR
53#if defined(__ANDROID__)
54#undef KMP_CANCEL_THREADS
60#undef KMP_CANCEL_THREADS
93#undef KMP_USE_HIER_SCHED
94#define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
98#if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED && !defined(OMPD_SKIP_HWLOC)
100#define KMP_HWLOC_ENABLED 1
101#ifndef HWLOC_OBJ_NUMANODE
102#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
104#ifndef HWLOC_OBJ_PACKAGE
105#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
108#define KMP_HWLOC_ENABLED 0
111#if KMP_ARCH_X86 || KMP_ARCH_X86_64
112#include <xmmintrin.h>
116#define KMP_INTERNAL_MALLOC(sz) malloc(sz)
117#define KMP_INTERNAL_FREE(p) free(p)
118#define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
119#define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
130#define KMP_HANDLE_SIGNALS ((KMP_OS_UNIX && !KMP_OS_WASI) || KMP_OS_WINDOWS)
135#if !defined NSIG && defined _NSIG
141#pragma weak clock_gettime
153#define UNLIKELY(x) (x)
162#ifndef USE_FAST_MEMORY
163#define USE_FAST_MEMORY 3
167#ifndef USE_CMP_XCHG_FOR_BGET
168#define USE_CMP_XCHG_FOR_BGET 1
176#define KMP_NSEC_PER_SEC 1000000000L
177#define KMP_USEC_PER_SEC 1000000L
178#define KMP_NSEC_PER_USEC 1000L
261template <
bool C = false,
bool S = true>
class kmp_flag_32;
262template <
bool C = false,
bool S = true>
class kmp_flag_64;
274#define KMP_PACK_64(HIGH_32, LOW_32) \
275 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
280 while (*(_x) == ' ' || *(_x) == '\t') \
283#define SKIP_DIGITS(_x) \
285 while (*(_x) >= '0' && *(_x) <= '9') \
288#define SKIP_TOKEN(_x) \
290 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
291 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
294#define SKIP_TO(_x, _c) \
296 while (*(_x) != '\0' && *(_x) != (_c)) \
302#define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
303#define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
318#ifdef USE_LOAD_BALANCE
319 dynamic_load_balance,
328#ifndef KMP_SCHED_TYPE_DEFINED
329#define KMP_SCHED_TYPE_DEFINED
340#if KMP_STATIC_STEAL_ENABLED
341 kmp_sched_static_steal = 102,
449#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
450#define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
451#define SCHEDULE_HAS_NO_MODIFIERS(s) \
452 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
453#define SCHEDULE_GET_MODIFIERS(s) \
454 ((enum sched_type)( \
455 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
456#define SCHEDULE_SET_MODIFIERS(s, m) \
457 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
458#define SCHEDULE_NONMONOTONIC 0
459#define SCHEDULE_MONOTONIC 1
478 *internal_kind = (
enum sched_type)((
int)*internal_kind |
508enum mic_type { non_mic, mic1, mic2, mic3, dummy };
522#undef KMP_FAST_REDUCTION_BARRIER
523#define KMP_FAST_REDUCTION_BARRIER 1
525#undef KMP_FAST_REDUCTION_CORE_DUO
526#if KMP_ARCH_X86 || KMP_ARCH_X86_64
527#define KMP_FAST_REDUCTION_CORE_DUO 1
550#if KMP_FAST_REDUCTION_BARRIER
551#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
552 ((reduction_method) | (barrier_type))
554#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
555 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
557#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
558 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
560#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
563#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
564 (packed_reduction_method)
566#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
569#define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
570 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
571 (which_reduction_block))
573#if KMP_FAST_REDUCTION_BARRIER
574#define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
575 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
577#define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
578 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
589#pragma warning(disable : 271 310)
623#if KMP_ARCH_X86 || KMP_ARCH_X86_64
624 KMP_HW_CORE_TYPE_ATOM = 0x20,
625 KMP_HW_CORE_TYPE_CORE = 0x40,
632#define KMP_HW_MAX_NUM_CORE_EFFS 8
634#define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
635 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
636#define KMP_ASSERT_VALID_HW_TYPE(type) \
637 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
639#define KMP_FOREACH_HW_TYPE(type) \
640 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
641 type = (kmp_hw_t)((int)type + 1))
648#if KMP_AFFINITY_SUPPORTED
652#if _MSC_VER < 1600 && KMP_MSVC_COMPAT
653typedef struct GROUP_AFFINITY {
659#if KMP_GROUP_AFFINITY
660extern int __kmp_num_proc_groups;
662static const int __kmp_num_proc_groups = 1;
664typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
665extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
667typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(
void);
668extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
670typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
671extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
673typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
675extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
679extern hwloc_topology_t __kmp_hwloc_topology;
680extern int __kmp_hwloc_error;
683extern size_t __kmp_affin_mask_size;
684#define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
685#define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
686#define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
687#define KMP_CPU_SET_ITERATE(i, mask) \
688 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
689#define KMP_CPU_SET(i, mask) (mask)->set(i)
690#define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
691#define KMP_CPU_CLR(i, mask) (mask)->clear(i)
692#define KMP_CPU_ZERO(mask) (mask)->zero()
693#define KMP_CPU_ISEMPTY(mask) (mask)->empty()
694#define KMP_CPU_COPY(dest, src) (dest)->copy(src)
695#define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
696#define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
697#define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
698#define KMP_CPU_EQUAL(dest, src) (dest)->is_equal(src)
699#define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
700#define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
701#define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
702#define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
703#define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
704#define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
705#define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
706#define KMP_CPU_ALLOC_ARRAY(arr, n) \
707 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
708#define KMP_CPU_FREE_ARRAY(arr, n) \
709 __kmp_affinity_dispatch->deallocate_mask_array(arr)
710#define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
711#define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
712#define __kmp_get_system_affinity(mask, abort_bool) \
713 (mask)->get_system_affinity(abort_bool)
714#define __kmp_set_system_affinity(mask, abort_bool) \
715 (mask)->set_system_affinity(abort_bool)
716#define __kmp_get_proc_group(mask) (mask)->get_proc_group()
722 void *
operator new(
size_t n);
723 void operator delete(
void *
p);
724 void *
operator new[](
size_t n);
725 void operator delete[](
void *
p);
728 virtual void set(
int i) {}
730 virtual bool is_set(
int i)
const {
return false; }
732 virtual void clear(
int i) {}
734 virtual void zero() {}
736 virtual bool empty()
const {
return true; }
738 virtual void copy(
const Mask *src) {}
740 virtual void bitwise_and(
const Mask *rhs) {}
742 virtual void bitwise_or(
const Mask *rhs) {}
744 virtual void bitwise_not() {}
746 virtual bool is_equal(
const Mask *rhs)
const {
return false; }
749 virtual int begin()
const {
return 0; }
750 virtual int end()
const {
return 0; }
751 virtual int next(
int previous)
const {
return 0; }
753 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
756 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
758 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
761 virtual int get_proc_group()
const {
return -1; }
762 int get_max_cpu()
const {
765 KMP_CPU_SET_ITERATE(cpu,
this) {
772 void *
operator new(
size_t n);
773 void operator delete(
void *
p);
775 virtual ~KMPAffinity() =
default;
777 virtual void determine_capable(
const char *env_var) {}
779 virtual void bind_thread(
int proc) {}
781 virtual Mask *allocate_mask() {
return nullptr; }
782 virtual void deallocate_mask(Mask *m) {}
783 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
784 virtual void deallocate_mask_array(Mask *m) {}
785 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
786 static void pick_api();
787 static void destroy_api();
795 virtual api_type get_api_type()
const {
801 static bool picked_api;
804typedef KMPAffinity::Mask kmp_affin_mask_t;
805extern KMPAffinity *__kmp_affinity_dispatch;
808class kmp_affinity_raii_t {
809 kmp_affin_mask_t *
mask;
813 kmp_affinity_raii_t(
const kmp_affin_mask_t *new_mask =
nullptr)
814 :
mask(nullptr), restored(false) {
815 if (KMP_AFFINITY_CAPABLE()) {
818 __kmp_get_system_affinity(
mask,
true);
820 __kmp_set_system_affinity(new_mask,
true);
824 if (
mask && KMP_AFFINITY_CAPABLE() && !restored) {
825 __kmp_set_system_affinity(
mask,
true);
830 ~kmp_affinity_raii_t() { restore(); }
836#define KMP_AFFIN_MASK_PRINT_LEN 1024
850enum affinity_top_method {
851 affinity_top_method_all = 0,
852#if KMP_ARCH_X86 || KMP_ARCH_X86_64
853 affinity_top_method_apicid,
854 affinity_top_method_x2apicid,
855 affinity_top_method_x2apicid_1f,
857 affinity_top_method_cpuinfo,
858#if KMP_GROUP_AFFINITY
859 affinity_top_method_group,
861 affinity_top_method_flat,
863 affinity_top_method_hwloc,
865 affinity_top_method_default
868#define affinity_respect_mask_default (2)
870typedef struct kmp_affinity_flags_t {
872 unsigned verbose : 1;
873 unsigned warnings : 1;
874 unsigned respect : 2;
877 unsigned core_types_gran : 1;
878 unsigned core_effs_gran : 1;
879 unsigned omp_places : 1;
880 unsigned reserved : 22;
881} kmp_affinity_flags_t;
884typedef struct kmp_affinity_ids_t {
889typedef struct kmp_affinity_attrs_t {
893 unsigned reserved : 15;
894} kmp_affinity_attrs_t;
895#define KMP_AFFINITY_ATTRS_UNKNOWN \
896 { KMP_HW_CORE_TYPE_UNKNOWN, kmp_hw_attr_t::UNKNOWN_CORE_EFF, 0, 0 }
898typedef struct kmp_affinity_t {
900 enum affinity_type
type;
903 kmp_affinity_attrs_t core_attr_gran;
906 kmp_affinity_flags_t flags;
908 kmp_affin_mask_t *masks;
909 kmp_affinity_ids_t *ids;
910 kmp_affinity_attrs_t *attrs;
911 unsigned num_os_id_masks;
912 kmp_affin_mask_t *os_id_masks;
916#define KMP_AFFINITY_INIT(env) \
918 nullptr, affinity_default, KMP_HW_UNKNOWN, -1, KMP_AFFINITY_ATTRS_UNKNOWN, \
920 {TRUE, FALSE, TRUE, affinity_respect_mask_default, FALSE, FALSE, \
921 FALSE, FALSE, FALSE}, \
922 0, nullptr, nullptr, nullptr, 0, nullptr, env \
925extern enum affinity_top_method __kmp_affinity_top_method;
926extern kmp_affinity_t __kmp_affinity;
927extern kmp_affinity_t __kmp_hh_affinity;
928extern kmp_affinity_t *__kmp_affinities[2];
932extern kmp_affin_mask_t *__kmp_affin_fullMask;
933extern kmp_affin_mask_t *__kmp_affin_origMask;
934extern char *__kmp_cpuinfo_file;
936#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
937extern int __kmp_first_osid_with_ecore;
966extern int __kmp_tool;
967extern char *__kmp_tool_libraries;
970#if KMP_AFFINITY_SUPPORTED
971#define KMP_PLACE_ALL (-1)
972#define KMP_PLACE_UNDEFINED (-2)
974#define KMP_AFFINITY_NON_PROC_BIND \
975 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
976 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
977 (__kmp_affinity.num_masks > 0 || __kmp_affinity.type == affinity_balanced))
1007#define KMP_PAD(type, sz) \
1008 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
1012#define KMP_GTID_DNE (-2)
1013#define KMP_GTID_SHUTDOWN (-3)
1014#define KMP_GTID_MONITOR (-4)
1015#define KMP_GTID_UNKNOWN (-5)
1016#define KMP_GTID_MIN (-6)
1066#define omp_atv_default ((omp_uintptr_t)-1)
1131#if KMP_HWLOC_ENABLED
1147extern void *
__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1154extern void *
__kmp_alloc(
int gtid,
size_t align,
size_t sz,
1156extern void *
__kmp_calloc(
int gtid,
size_t align,
size_t nmemb,
size_t sz,
1184#if ENABLE_LIBOMPTARGET
1185extern void __kmp_init_target_task();
1190#define KMP_UINT64_MAX \
1191 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1193#define KMP_MIN_NTH 1
1196#if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1197#define KMP_MAX_NTH PTHREAD_THREADS_MAX
1203#define KMP_MAX_NTH 64
1205#define KMP_MAX_NTH INT_MAX
1210#ifdef PTHREAD_STACK_MIN
1211#define KMP_MIN_STKSIZE ((size_t)PTHREAD_STACK_MIN)
1213#define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1216#if KMP_OS_AIX && KMP_ARCH_PPC
1217#define KMP_MAX_STKSIZE 0x10000000
1219#define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1223#define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1224#elif KMP_ARCH_X86_64
1225#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1226#define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1230#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1233#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1235#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1238#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1239#define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1240#define KMP_MAX_MALLOC_POOL_INCR \
1241 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1243#define KMP_MIN_STKOFFSET (0)
1244#define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1246#define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1248#define KMP_DEFAULT_STKOFFSET CACHE_LINE
1251#define KMP_MIN_STKPADDING (0)
1252#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1254#define KMP_BLOCKTIME_MULTIPLIER \
1256#define KMP_MIN_BLOCKTIME (0)
1257#define KMP_MAX_BLOCKTIME \
1261#define KMP_DEFAULT_BLOCKTIME (__kmp_is_hybrid_cpu() ? (0) : (200000))
1264#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1265#define KMP_MIN_MONITOR_WAKEUPS (1)
1266#define KMP_MAX_MONITOR_WAKEUPS (1000)
1270#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1271 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1272 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1273 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1274 ? (monitor_wakeups) \
1275 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1279#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1280 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1281 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1283#define KMP_BLOCKTIME(team, tid) \
1284 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1285#if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1289#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1290#define KMP_NOW() ((kmp_uint64)_rdtsc())
1292#define KMP_NOW() __kmp_hardware_timestamp()
1294#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1295 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_usec)
1296#define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1300#define KMP_NOW() __kmp_now_nsec()
1301#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1302 ((kmp_uint64)KMP_BLOCKTIME(team, tid) * (kmp_uint64)KMP_NSEC_PER_USEC)
1303#define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1307#define KMP_MIN_STATSCOLS 40
1308#define KMP_MAX_STATSCOLS 4096
1309#define KMP_DEFAULT_STATSCOLS 80
1311#define KMP_MIN_INTERVAL 0
1312#define KMP_MAX_INTERVAL (INT_MAX - 1)
1313#define KMP_DEFAULT_INTERVAL 0
1315#define KMP_MIN_CHUNK 1
1316#define KMP_MAX_CHUNK (INT_MAX - 1)
1317#define KMP_DEFAULT_CHUNK 1
1319#define KMP_MIN_DISP_NUM_BUFF 1
1320#define KMP_DFLT_DISP_NUM_BUFF 7
1321#define KMP_MAX_DISP_NUM_BUFF 4096
1323#define KMP_MAX_ORDERED 8
1325#define KMP_MAX_FIELDS 32
1327#define KMP_MAX_BRANCH_BITS 31
1329#define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1331#define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1333#define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1338#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1339#define KMP_TLS_GTID_MIN 5
1341#define KMP_TLS_GTID_MIN INT_MAX
1344#define KMP_MASTER_TID(tid) (0 == (tid))
1345#define KMP_WORKER_TID(tid) (0 != (tid))
1347#define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1348#define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1349#define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1353#define TRUE (!FALSE)
1359#define KMP_INIT_WAIT 64U
1360#define KMP_NEXT_WAIT 32U
1362#define KMP_INIT_WAIT 1024U
1363#define KMP_NEXT_WAIT 512U
1366#define KMP_INIT_WAIT 1024U
1367#define KMP_NEXT_WAIT 512U
1368#elif KMP_OS_DRAGONFLY
1370#define KMP_INIT_WAIT 1024U
1371#define KMP_NEXT_WAIT 512U
1374#define KMP_INIT_WAIT 1024U
1375#define KMP_NEXT_WAIT 512U
1378#define KMP_INIT_WAIT 1024U
1379#define KMP_NEXT_WAIT 512U
1382#define KMP_INIT_WAIT 1024U
1383#define KMP_NEXT_WAIT 512U
1386#define KMP_INIT_WAIT 1024U
1387#define KMP_NEXT_WAIT 512U
1390#define KMP_INIT_WAIT 1024U
1391#define KMP_NEXT_WAIT 512U
1394#define KMP_INIT_WAIT 1024U
1395#define KMP_NEXT_WAIT 512U
1398#define KMP_INIT_WAIT 1024U
1399#define KMP_NEXT_WAIT 512U
1402#define KMP_INIT_WAIT 1024U
1403#define KMP_NEXT_WAIT 512U
1406#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1407typedef struct kmp_cpuid {
1414typedef struct kmp_cpuinfo_flags_t {
1417 unsigned hybrid : 1;
1418 unsigned reserved : 29;
1419} kmp_cpuinfo_flags_t;
1421typedef struct kmp_cpuinfo {
1428 kmp_cpuinfo_flags_t flags;
1431 char name[3 *
sizeof(kmp_cpuid_t)];
1434extern void __kmp_query_cpuid(kmp_cpuinfo_t *
p);
1439static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *
p) {
1440 __asm__ __volatile__(
"cpuid"
1441 :
"=a"(
p->eax),
"=b"(
p->ebx),
"=c"(
p->ecx),
"=d"(
p->edx)
1442 :
"a"(leaf),
"c"(subleaf));
1445static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *
p) {
1446 __asm__ __volatile__(
"fldcw %0" : :
"m"(*
p));
1449static inline void __kmp_store_x87_fpu_control_word(
kmp_int16 *
p) {
1450 __asm__ __volatile__(
"fstcw %0" :
"=m"(*
p));
1452static inline void __kmp_clear_x87_fpu_status_word() {
1455 struct x87_fpu_state {
1464 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1465 __asm__ __volatile__(
"fstenv %0\n\t"
1466 "andw $0x7f00, %1\n\t"
1468 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1470 __asm__ __volatile__(
"fnclex");
1474static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) { _mm_setcsr(*
p); }
1475static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = _mm_getcsr(); }
1477static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) {}
1478static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = 0; }
1482extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *
p);
1483extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *
p);
1484extern void __kmp_store_x87_fpu_control_word(
kmp_int16 *
p);
1485extern void __kmp_clear_x87_fpu_status_word();
1486static inline void __kmp_load_mxcsr(
const kmp_uint32 *
p) { _mm_setcsr(*
p); }
1487static inline void __kmp_store_mxcsr(
kmp_uint32 *
p) { *
p = _mm_getcsr(); }
1490#define KMP_X86_MXCSR_MASK 0xffffffc0
1495#if KMP_HAVE_WAITPKG_INTRINSICS
1496#if KMP_HAVE_IMMINTRIN_H
1497#include <immintrin.h>
1498#elif KMP_HAVE_INTRIN_H
1504static inline int __kmp_tpause(uint32_t hint, uint64_t
counter) {
1505#if !KMP_HAVE_WAITPKG_INTRINSICS
1506 uint32_t timeHi = uint32_t(
counter >> 32);
1507 uint32_t timeLo = uint32_t(
counter & 0xffffffff);
1509 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1515 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1519 return _tpause(hint,
counter);
1523static inline void __kmp_umonitor(
void *cacheline) {
1524#if !KMP_HAVE_WAITPKG_INTRINSICS
1525 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1530 _umonitor(cacheline);
1534static inline int __kmp_umwait(uint32_t hint, uint64_t
counter) {
1535#if !KMP_HAVE_WAITPKG_INTRINSICS
1536 uint32_t timeHi = uint32_t(
counter >> 32);
1537 uint32_t timeLo = uint32_t(
counter & 0xffffffff);
1539 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1545 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1549 return _umwait(hint,
counter);
1554#include <pmmintrin.h>
1562__kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1563 _mm_monitor(cacheline, extensions, hints);
1569__kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1570 _mm_mwait(extensions, hints);
1575extern void __kmp_x86_pause(
void);
1581static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1583static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1585#define KMP_CPU_PAUSE() __kmp_x86_pause()
1587#define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1588#define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1589#define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1590#define KMP_CPU_PAUSE() \
1592 KMP_PPC64_PRI_LOW(); \
1593 KMP_PPC64_PRI_MED(); \
1594 KMP_PPC64_PRI_LOC_MB(); \
1597#define KMP_CPU_PAUSE()
1600#define KMP_INIT_YIELD(count) \
1601 { (count) = __kmp_yield_init; }
1603#define KMP_INIT_BACKOFF(time) \
1604 { (time) = __kmp_pause_init; }
1606#define KMP_OVERSUBSCRIBED \
1607 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1609#define KMP_TRY_YIELD \
1610 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1612#define KMP_TRY_YIELD_OVERSUB \
1613 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1615#define KMP_YIELD(cond) \
1618 if ((cond) && (KMP_TRY_YIELD)) \
1622#define KMP_YIELD_OVERSUB() \
1625 if ((KMP_TRY_YIELD_OVERSUB)) \
1631#define KMP_YIELD_SPIN(count) \
1634 if (KMP_TRY_YIELD) { \
1638 (count) = __kmp_yield_next; \
1649#define KMP_TPAUSE_MAX_MASK ((kmp_uint64)0xFFFF)
1650#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1652 if (__kmp_tpause_enabled) { \
1653 if (KMP_OVERSUBSCRIBED) { \
1654 __kmp_tpause(0, (time)); \
1656 __kmp_tpause(__kmp_tpause_hint, (time)); \
1658 (time) = (time << 1 | 1) & KMP_TPAUSE_MAX_MASK; \
1661 if ((KMP_TRY_YIELD_OVERSUB)) { \
1663 } else if (__kmp_use_yield == 1) { \
1667 (count) = __kmp_yield_next; \
1673#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1676 if ((KMP_TRY_YIELD_OVERSUB)) \
1678 else if (__kmp_use_yield == 1) { \
1682 (count) = __kmp_yield_next; \
1710#define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1736typedef HANDLE kmp_thread_t;
1737typedef DWORD kmp_key_t;
1741typedef pthread_t kmp_thread_t;
1742typedef pthread_key_t kmp_key_t;
1761typedef int kmp_itt_mark_t;
1762#define KMP_ITT_DEBUG 0
1794typedef void *(*kmpc_ctor)(
void *);
1807typedef void *(*kmpc_cctor)(
void *,
void *);
1817typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1829typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1881#define KMP_HASH_TABLE_LOG2 9
1882#define KMP_HASH_TABLE_SIZE \
1883 (1 << KMP_HASH_TABLE_LOG2)
1884#define KMP_HASH_SHIFT 3
1885#define KMP_HASH(x) \
1886 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1898#if KMP_USE_HIER_SCHED
1901typedef struct kmp_hier_private_bdata_t {
1905} kmp_hier_private_bdata_t;
1919#if KMP_STATIC_STEAL_ENABLED
1945#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1955#if CACHE_LINE <= 128
1984#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
1995#if CACHE_LINE <= 128
2054#if KMP_USE_HIER_SCHED
2090#if KMP_USE_HIER_SCHED
2093#if KMP_HWLOC_ENABLED
2115#if KMP_USE_INTERNODE_ALIGNMENT
2124#define KMP_INIT_BARRIER_STATE 0
2125#define KMP_BARRIER_SLEEP_BIT 0
2126#define KMP_BARRIER_UNUSED_BIT 1
2127#define KMP_BARRIER_BUMP_BIT 2
2129#define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
2130#define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
2131#define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
2133#if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
2134#error "Barrier sleep bit must be smaller than barrier bump bit"
2136#if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
2137#error "Barrier unused bit must be smaller than barrier bump bit"
2141#define KMP_BARRIER_NOT_WAITING 0
2142#define KMP_BARRIER_OWN_FLAG \
2144#define KMP_BARRIER_PARENT_FLAG \
2146#define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
2148#define KMP_BARRIER_SWITCHING \
2151#define KMP_NOT_SAFE_TO_REAP \
2153#define KMP_SAFE_TO_REAP 1
2168#if KMP_FAST_REDUCTION_BARRIER
2175#if !KMP_FAST_REDUCTION_BARRIER
2176#define bs_reduction_barrier bs_plain_barrier
2191#define KMP_BARRIER_ICV_PUSH 1
2288typedef struct kmp_win32_mutex {
2290 CRITICAL_SECTION cs;
2293typedef struct kmp_win32_cond {
2298 kmp_win32_mutex_t waiters_count_lock_;
2305 int wait_generation_count_;
2317 pthread_cond_t c_cond;
2320typedef union kmp_cond_union kmp_cond_align_t;
2325 pthread_mutex_t m_mutex;
2328typedef union kmp_mutex_union kmp_mutex_align_t;
2340 volatile int ds_alive;
2369#if !USE_CMP_XCHG_FOR_BGET
2370#ifdef USE_QUEUING_LOCK_FOR_BGET
2386#define KMP_CHECK_UPDATE(a, b) \
2389#define KMP_CHECK_UPDATE_SYNC(a, b) \
2391 TCW_SYNC_PTR((a), (b))
2393#define get__blocktime(xteam, xtid) \
2394 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2395#define get__bt_set(xteam, xtid) \
2396 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2398#define get__bt_intervals(xteam, xtid) \
2399 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2402#define get__dynamic_2(xteam, xtid) \
2403 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2404#define get__nproc_2(xteam, xtid) \
2405 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2406#define get__sched_2(xteam, xtid) \
2407 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2409#define set__blocktime_team(xteam, xtid, xval) \
2410 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2414#define set__bt_intervals_team(xteam, xtid, xval) \
2415 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2419#define set__bt_set_team(xteam, xtid, xval) \
2420 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2422#define set__dynamic(xthread, xval) \
2423 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2424#define get__dynamic(xthread) \
2425 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2427#define set__nproc(xthread, xval) \
2428 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2430#define set__thread_limit(xthread, xval) \
2431 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2433#define set__max_active_levels(xthread, xval) \
2434 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2436#define get__max_active_levels(xthread) \
2437 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2439#define set__sched(xthread, xval) \
2440 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2442#define set__proc_bind(xthread, xval) \
2443 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2444#define get__proc_bind(xthread) \
2445 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2469#define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2470#define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2474#define KMP_TASKING_ENABLED(task_team) \
2475 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2514 std::atomic<kmp_int32>
2529#define KMP_DEP_IN 0x1
2530#define KMP_DEP_OUT 0x2
2531#define KMP_DEP_INOUT 0x3
2532#define KMP_DEP_MTX 0x4
2533#define KMP_DEP_SET 0x8
2534#define KMP_DEP_ALL 0x80
2543#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2570#define MAX_MTX_DEPS 4
2578#if KMP_SUPPORT_GRAPH_OUTPUT
2635#define INIT_MAPSIZE 50
2637typedef struct kmp_taskgraph_flags {
2638 unsigned nowait : 1;
2639 unsigned re_record : 1;
2640 unsigned reserved : 30;
2641} kmp_taskgraph_flags_t;
2644typedef struct kmp_node_info {
2648 std::atomic<kmp_int32>
2649 npredecessors_counter;
2656typedef enum kmp_tdg_status {
2658 KMP_TDG_RECORDING = 1,
2663typedef struct kmp_tdg_info {
2665 kmp_taskgraph_flags_t tdg_flags;
2669 kmp_node_info_t *record_map;
2670 kmp_tdg_status_t tdg_status =
2672 std::atomic<kmp_int32> num_tasks;
2676 void *rec_taskred_data;
2681extern int __kmp_tdg_dot;
2683extern kmp_tdg_info_t **__kmp_global_tdgs;
2686extern std::atomic<kmp_int32> __kmp_tdg_task_id;
2691#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2784 std::atomic<kmp_int32>
2794#if defined(KMP_GOMP_COMPAT)
2799#if defined(KMP_GOMP_COMPAT)
2801 void (*td_copy_func)(
void *,
void *);
2808 bool is_taskgraph = 0;
2809 kmp_tdg_info_t *tdg;
2834#define TASK_DEQUE_BITS 8
2835#define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2837#define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2838#define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2897#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2900typedef struct kmp_free_list {
2901 void *th_free_list_self;
2902 void *th_free_list_sync;
2904 void *th_free_list_other;
2973#if KMP_AFFINITY_SUPPORTED
2974 kmp_affin_mask_t *th_affin_mask;
2975 kmp_affinity_ids_t th_topology_ids;
2976 kmp_affinity_attrs_t th_topology_attrs;
2993#if KMP_AFFINITY_SUPPORTED
2994 int th_current_place;
3047#if KMP_USE_HIER_SCHED
3049 kmp_hier_private_bdata_t *th_hier_bar_data;
3058#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
3060 kmp_free_list_t th_free_lists[NUM_LISTS];
3065 kmp_win32_cond_t th_suspend_cv;
3066 kmp_win32_mutex_t th_suspend_mx;
3067 std::atomic<int> th_suspend_init;
3070 kmp_cond_align_t th_suspend_cv;
3071 kmp_mutex_align_t th_suspend_mx;
3072 std::atomic<int> th_suspend_init_count;
3076 kmp_itt_mark_t th_itt_mark_single;
3079#if KMP_STATS_ENABLED
3080 kmp_stats_list *th_stats;
3083 std::atomic<bool> th_blocking;
3115#define KMP_MIN_MALLOC_ARGV_ENTRIES 100
3121#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3122#define KMP_INLINE_ARGV_BYTES \
3124 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
3125 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
3128#define KMP_INLINE_ARGV_BYTES \
3129 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
3131#define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
3175#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3196#if KMP_AFFINITY_SUPPORTED
3206#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
3211 char dummy_padding[1024];
3221 std::atomic<kmp_uint32> t_copyin_counter;
3281#if KMP_AFFINITY_SUPPORTED
3282 int r_affinity_assigned;
3310extern int __kmp_forkjoin_frames;
3311extern int __kmp_forkjoin_frames_mode;
3317extern int kmp_a_debug;
3318extern int kmp_b_debug;
3319extern int kmp_c_debug;
3320extern int kmp_d_debug;
3321extern int kmp_e_debug;
3322extern int kmp_f_debug;
3326#define KMP_DEBUG_BUF_LINES_INIT 512
3327#define KMP_DEBUG_BUF_LINES_MIN 1
3329#define KMP_DEBUG_BUF_CHARS_INIT 128
3330#define KMP_DEBUG_BUF_CHARS_MIN 2
3348extern int __kmp_par_range;
3350#define KMP_PAR_RANGE_ROUTINE_LEN 1024
3351extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
3352#define KMP_PAR_RANGE_FILENAME_LEN 1024
3353extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
3354extern int __kmp_par_range_lb;
3355extern int __kmp_par_range_ub;
3365#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3366extern kmp_cpuinfo_t __kmp_cpuinfo;
3368#elif KMP_OS_DARWIN && KMP_ARCH_AARCH64
3381extern volatile int __kmp_init_monitor;
3433extern size_t __kmp_monitor_stksize;
3448extern int __kmp_suspend_count;
3490 if (*bt > INT_MAX / 1000) {
3491 *bt = INT_MAX / 1000;
3492 KMP_INFORM(MaxValueUsing,
"kmp_set_blocktime(ms)", bt);
3500 __kmp_monitor_wakeups;
3501extern int __kmp_bt_intervals;
3504#ifdef KMP_ADJUST_BLOCKTIME
3505extern int __kmp_zero_bt;
3507#ifdef KMP_DFLT_NTH_CORES
3525#ifdef KMP_TDATA_GTID
3530#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3531extern int __kmp_inherit_fp_control;
3532extern kmp_int16 __kmp_init_x87_fpu_control_word;
3547#if KMP_MIC_SUPPORTED
3548extern enum mic_type __kmp_mic_type;
3551#ifdef USE_LOAD_BALANCE
3552extern double __kmp_load_balance_interval;
3555#if KMP_USE_ADAPTIVE_LOCKS
3558struct kmp_adaptive_backoff_params_t {
3566extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3568#if KMP_DEBUG_ADAPTIVE_LOCKS
3569extern const char *__kmp_speculative_statsfile;
3603#define __kmp_get_gtid() __kmp_get_global_thread_id()
3604#define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3605#define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3606#define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3607#define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3612#define __kmp_get_team_num_threads(gtid) \
3613 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3629 return team->
t.t_threads[tid]->th.th_info.ds.ds_gtid;
3634 return thr->th.th_info.ds.ds_gtid;
3652#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3653extern int __kmp_user_level_mwait;
3654extern int __kmp_umwait_enabled;
3655extern int __kmp_mwait_enabled;
3656extern int __kmp_mwait_hints;
3660extern int __kmp_waitpkg_enabled;
3661extern int __kmp_tpause_state;
3662extern int __kmp_tpause_hint;
3663extern int __kmp_tpause_enabled;
3677#define _KMP_GEN_ID(counter) \
3678 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3680#define _KMP_GEN_ID(counter) (~0)
3683#define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3684#define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3689 size_t size,
char const *format, ...);
3715#ifdef USE_LOAD_BALANCE
3725extern void __kmp_warn(
char const *format, ...);
3757#define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3758#define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3759#define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3762extern void *___kmp_fast_allocate(
kmp_info_t *this_thr,
3765extern void __kmp_free_fast_memory(
kmp_info_t *this_thr);
3766extern void __kmp_initialize_fast_memory(
kmp_info_t *this_thr);
3767#define __kmp_fast_allocate(this_thr, size) \
3768 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3769#define __kmp_fast_free(this_thr, ptr) \
3770 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3779#define __kmp_thread_malloc(th, size) \
3780 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3781#define __kmp_thread_calloc(th, nelem, elsize) \
3782 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3783#define __kmp_thread_realloc(th, ptr, size) \
3784 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3785#define __kmp_thread_free(th, ptr) \
3786 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3791 int *num_threads_list);
3800 int num_teams_ub,
int num_threads);
3839#ifdef KMP_GOMP_COMPAT
3888#if KMP_HANDLE_SIGNALS
3889extern int __kmp_handle_signals;
3890extern void __kmp_install_signals(
int parallel_init);
3891extern void __kmp_remove_signals(
void);
3902#if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && (KMP_ARCH_AARCH64 || KMP_ARCH_ARM))
3911#if KMP_AFFINITY_SUPPORTED
3912extern char *__kmp_affinity_print_mask(
char *
buf,
int buf_len,
3913 kmp_affin_mask_t *
mask);
3915 kmp_affin_mask_t *
mask);
3916extern void __kmp_affinity_initialize(kmp_affinity_t &affinity);
3917extern void __kmp_affinity_uninitialize(
void);
3918extern void __kmp_affinity_set_init_mask(
3919 int gtid,
int isa_root);
3920void __kmp_affinity_bind_init_mask(
int gtid);
3921extern void __kmp_affinity_bind_place(
int gtid);
3923extern int __kmp_aux_set_affinity(
void **
mask);
3924extern int __kmp_aux_get_affinity(
void **
mask);
3925extern int __kmp_aux_get_affinity_max_proc();
3926extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **
mask);
3927extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **
mask);
3928extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **
mask);
3929extern void __kmp_balanced_affinity(
kmp_info_t *th,
int team_size);
3930#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
3931extern int __kmp_get_first_osid_with_ecore(
void);
3933#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3935extern int kmp_set_thread_affinity_mask_initial(
void);
3940 if (
r->r.r_uber_thread ==
__kmp_threads[gtid] && !
r->r.r_affinity_assigned) {
3941 __kmp_affinity_set_init_mask(gtid,
TRUE);
3942 __kmp_affinity_bind_init_mask(gtid);
3943 r->r.r_affinity_assigned =
TRUE;
3947 if (!KMP_AFFINITY_CAPABLE())
3951 if (
r->r.r_uber_thread == th &&
r->r.r_affinity_assigned) {
3952 __kmp_set_system_affinity(__kmp_affin_origMask,
FALSE);
3953 KMP_CPU_COPY(th->th.th_affin_mask, __kmp_affin_origMask);
3954 r->r.r_affinity_assigned =
FALSE;
3958#define __kmp_assign_root_init_mask()
3973extern int __kmp_futex_determine_capable(
void);
3985extern void __kmp_create_monitor(
kmp_info_t *th);
4032 ompt_data_t ompt_parallel_data,
4055 size_t reduce_size,
void *reduce_data,
4056 void (*reduce)(
void *,
void *));
4081 int exit_teams = 0);
4118 size_t sizeof_kmp_task_t,
4119 size_t sizeof_shareds,
4150#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr) \
4152 __kmp_tasking_mode != tskm_task_teams || team->t.t_nproc == 1 || \
4153 thr->th.th_task_team == team->t.t_task_team[thr->th.th_task_state])
4155#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr)
4174 void **exit_frame_ptr
4187 size_t vector_length);
4248 size_t cpy_size,
void *cpy_data,
4249 void (*cpy_func)(
void *,
void *),
4264 size_t sizeof_kmp_task_t,
4265 size_t sizeof_shareds,
4315 bool serialize_immediate);
4344 int num,
void *
data);
4380 void **user_lock, uintptr_t hint);
4389static inline bool __kmp_tdg_is_recording(kmp_tdg_status_t
status) {
4390 return status == KMP_TDG_RECORDING;
4403 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4409 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4418 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4438 const char *message);
4445 kmp_int32 *num_threads_list,
int severity,
const char *message);
4482 void *data_addr,
size_t pc_size);
4491#define KMPC_CONVENTION __cdecl
4493#define KMPC_CONVENTION
4526 char const *format);
4539#define KMP_DEVICE_DEFAULT -1
4540#define KMP_DEVICE_ALL -11
4598#define KMP_HIDDEN_HELPER_THREAD(gtid) \
4599 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4601#define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4602 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4604#define KMP_HIDDEN_HELPER_MAIN_THREAD(gtid) \
4605 ((gtid) == 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4607#define KMP_HIDDEN_HELPER_TEAM(team) \
4608 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4612#define KMP_GTID_TO_SHADOW_GTID(gtid) \
4613 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4619 int adjusted_gtid = gtid;
4624 return adjusted_gtid;
4627#if ENABLE_LIBOMPTARGET
4630extern void (*kmp_target_sync_cb)(
ident_t *loc_ref,
int gtid,
4631 void *current_task,
void *
event);
4649template <
bool C,
bool S>
4651template <
bool C,
bool S>
4653template <
bool C,
bool S>
4657#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4658template <
bool C,
bool S>
4660template <
bool C,
bool S>
4662template <
bool C,
bool S>
4666template <
bool C,
bool S>
4668template <
bool C,
bool S>
4670template <
bool C,
bool S>
4675template <
bool C,
bool S>
4678 int *thread_finished,
4683template <
bool C,
bool S>
4686 int *thread_finished,
4691template <
bool C,
bool S>
4694 int final_spin,
int *thread_finished,
4701 int *thread_finished,
4723 if (f && f != stdout && f != stderr) {
4732 const char *env_var =
nullptr)
4744 const char *env_var =
nullptr) {
4746 f = fopen(filename,
mode);
4762 f = fopen(filename,
mode);
4780 operator FILE *() {
return f; }
4783template <
typename SourceType,
typename TargetType,
4784 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4785 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4786 bool isSourceSigned = std::is_signed<SourceType>::value,
4787 bool isTargetSigned = std::is_signed<TargetType>::value>
4791template <
typename SourceType,
typename TargetType>
4793 static TargetType
to(SourceType src) {
return (TargetType)src; }
4796template <
typename SourceType,
typename TargetType>
4798 static TargetType
to(SourceType src) {
return src; }
4801template <
typename SourceType,
typename TargetType>
4803 static TargetType
to(SourceType src) {
4805 (std::numeric_limits<TargetType>::max)()));
4807 (std::numeric_limits<TargetType>::min)()));
4808 return (TargetType)src;
4814template <
typename SourceType,
typename TargetType>
4816 static TargetType
to(SourceType src) {
4818 return (TargetType)src;
4822template <
typename SourceType,
typename TargetType>
4824 static TargetType
to(SourceType src) {
4826 return (TargetType)src;
4830template <
typename SourceType,
typename TargetType>
4832 static TargetType
to(SourceType src) {
4835 (std::numeric_limits<TargetType>::max)()));
4836 return (TargetType)src;
4842template <
typename SourceType,
typename TargetType>
4844 static TargetType
to(SourceType src) {
return (TargetType)src; }
4847template <
typename SourceType,
typename TargetType>
4849 static TargetType
to(SourceType src) {
4851 (std::numeric_limits<TargetType>::max)()));
4852 return (TargetType)src;
4856template <
typename SourceType,
typename TargetType>
4858 static TargetType
to(SourceType src) {
4860 (std::numeric_limits<TargetType>::max)()));
4861 return (TargetType)src;
4867template <
typename SourceType,
typename TargetType>
4869 static TargetType
to(SourceType src) {
return (TargetType)src; }
4872template <
typename SourceType,
typename TargetType>
4874 static TargetType
to(SourceType src) {
return src; }
4877template <
typename SourceType,
typename TargetType>
4879 static TargetType
to(SourceType src) {
4881 (std::numeric_limits<TargetType>::max)()));
4882 return (TargetType)src;
4886template <
typename T1,
typename T2>
void * target(void *task)
int task_entry(kmp_int32 gtid, kmp_task_t *task)
void set_stdout()
Set the FILE* object to stdout and output there No open call should happen before this call.
void set_stderr()
Set the FILE* object to stderr and output there No open call should happen before this call.
kmp_safe_raii_file_t(const kmp_safe_raii_file_t &other)=delete
int try_open(const char *filename, const char *mode)
Instead of erroring out, return non-zero when unsuccessful fopen() for any reason.
kmp_safe_raii_file_t & operator=(const kmp_safe_raii_file_t &other)=delete
kmp_safe_raii_file_t(const char *filename, const char *mode, const char *env_var=nullptr)
void open(const char *filename, const char *mode, const char *env_var=nullptr)
Open filename using mode.
kmp_int32(*)(kmp_int32, void *) kmp_routine_entry_t
union kmp_cmplrdata kmp_cmplrdata_t
kmp_int32(* kmp_routine_entry_t)(kmp_int32, void *)
struct kmp_task kmp_task_t
struct ident ident_t
The ident structure that describes a source location.
union kmp_cmplrdata kmp_cmplrdata_t
@ KMP_IDENT_BARRIER_IMPL_FOR
@ KMP_IDENT_KMPC
Use c-style ident structure.
@ KMP_IDENT_ATOMIC_HINT_CONTENDED
@ KMP_IDENT_BARRIER_IMPL_MASK
@ KMP_IDENT_BARRIER_IMPL_SECTIONS
@ KMP_IDENT_IMB
Use trampoline for internal microtasks.
@ KMP_IDENT_BARRIER_IMPL_WORKSHARE
@ KMP_IDENT_WORK_LOOP
To mark a static loop in OMPT callbacks.
@ KMP_IDENT_BARRIER_IMPL
To Mark implicit barriers.
@ KMP_IDENT_ATOMIC_HINT_UNCONTENDED
@ KMP_IDENT_WORK_SECTIONS
To mark a sections directive in OMPT callbacks.
@ KMP_IDENT_AUTOPAR
Entry point generated by auto-parallelization.
@ KMP_IDENT_ATOMIC_HINT_SPECULATIVE
@ KMP_IDENT_BARRIER_IMPL_SINGLE
@ KMP_IDENT_ATOMIC_HINT_MASK
Atomic hint; bottom four bits as omp_sync_hint_t.
@ KMP_IDENT_WORK_DISTRIBUTE
To mark a distribute construct in OMPT callbacks.
@ KMP_IDENT_OPENMP_SPEC_VERSION_MASK
@ KMP_IDENT_BARRIER_EXPL
To mark a 'barrier' directive in user code.
@ KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE
@ KMP_IDENT_ATOMIC_REDUCE
Compiler generates atomic reduction option for kmpc_reduce*.
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_fork_call_if(ident_t *loc, kmp_int32 nargs, kmpc_micro microtask, kmp_int32 cond, void *args)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_set_thread_limit(ident_t *loc, kmp_int32 global_tid, kmp_int32 thread_limit)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_threads_list(ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length, kmp_int32 *num_threads_list)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
The type for a microtask which gets passed to __kmpc_fork_call().
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT bool __kmpc_omp_has_task_team(kmp_int32 gtid)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
KMP_EXPORT void ** __kmpc_omp_get_target_async_handle_ptr(kmp_int32 gtid)
void(* kmpc_dtor)(void *)
Pointer to the destructor function.
void *(* kmpc_cctor)(void *, void *)
Pointer to an alternate constructor.
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Array constructor.
void *(* kmpc_ctor)(void *)
Pointer to the constructor function.
KMP_EXPORT void * __kmpc_copyprivate_light(ident_t *loc, kmp_int32 gtid, void *cpy_data)
void *(* kmpc_ctor_vec)(void *, size_t)
Array constructor.
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void(* kmpc_dtor_vec)(void *, size_t)
Pointer to the array destructor function.
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
sched_type
Describes the loop schedule to be used for a parallel for loop.
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_next_section(ident_t *loc, kmp_int32 global_tid, kmp_int32 numberOfSections)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_sections(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_sections_init(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_nm_guided_chunked
guided unspecialized
@ kmp_sch_runtime_simd
runtime with chunk adjustment
@ kmp_nm_ord_dynamic_chunked
@ kmp_distribute_static_chunked
distribute static chunked
@ kmp_sch_static
static unspecialized
@ kmp_sch_guided_simd
guided with chunk adjustment
@ kmp_ord_dynamic_chunked
@ kmp_sch_modifier_monotonic
Set if the monotonic schedule modifier was present.
@ kmp_sch_default
default scheduling algorithm
@ kmp_sch_modifier_nonmonotonic
Set if the nonmonotonic schedule modifier was present.
@ kmp_nm_ord_static
ordered static unspecialized
@ kmp_distribute_static
distribute static unspecialized
@ kmp_sch_guided_chunked
guided unspecialized
@ kmp_sch_dynamic_chunked
@ kmp_sch_guided_analytical_chunked
@ kmp_sch_static_balanced
@ kmp_nm_static
static unspecialized
@ kmp_sch_lower
lower bound for unordered values
@ kmp_nm_guided_analytical_chunked
@ kmp_nm_upper
upper bound for nomerge values
@ kmp_ord_lower
lower bound for ordered values, must be power of 2
@ kmp_ord_static
ordered static unspecialized
@ kmp_sch_guided_iterative_chunked
@ kmp_sch_static_balanced_chunked
@ kmp_sch_upper
upper bound for unordered values
@ kmp_ord_upper
upper bound for ordered values
@ kmp_nm_lower
lower bound for nomerge values
@ kmp_nm_guided_iterative_chunked
@ kmp_ord_auto
ordered auto
@ kmp_nm_ord_static_chunked
@ kmp_nm_ord_guided_chunked
__itt_string_handle * name
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int initialized
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope ITT_FORMAT d const __itt_domain __itt_scope __itt_string_handle const char size_t ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_relation __itt_id ITT_FORMAT lu __itt_track_group __itt_string_handle __itt_track_group_type ITT_FORMAT d __itt_track ITT_FORMAT p void int const int int const char int ITT_FORMAT d void void const char * path
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event event
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t mode
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team)
struct kmp_disp kmp_disp_t
int __kmp_memkind_available
omp_memspace_handle_t const omp_default_mem_space
KMP_EXPORT void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
int __kmp_hot_teams_max_level
void __kmp_finish_implicit_task(kmp_info_t *this_thr)
void * omp_memspace_handle_t
volatile kmp_team_t * __kmp_team_pool
KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(ident_t *loc_ref, kmp_int32 gtid)
int __kmp_pause_resource(kmp_pause_status_t level)
void * omp_allocator_handle_t
void __kmp_warn(char const *format,...)
void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL)
void KMPC_SET_DYNAMIC(int flag)
kmp_bar_pat_e __kmp_barrier_release_pat_dflt
struct kmp_dephash kmp_dephash_t
kmp_info_t * __kmp_hidden_helper_main_thread
int __kmp_generate_warnings
volatile int __kmp_init_user_locks
int __kmp_debug_buf_lines
omp_allocator_handle_t __kmpc_init_allocator(int gtid, omp_memspace_handle_t, int ntraits, omp_alloctrait_t traits[])
kmp_proc_bind_t __kmp_teams_proc_bind
KMP_EXPORT void KMPC_CONVENTION kmpc_set_library(int)
kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker)
void ompc_set_dynamic(int flag)
void __kmp_free_team(kmp_root_t *, kmp_team_t *, kmp_info_t *)
kmp_bootstrap_lock_t __kmp_initz_lock
void __kmp_aux_set_defaults(char const *str, size_t len)
int __kmp_display_env_verbose
omp_allocator_handle_t const omp_cgroup_mem_alloc
kmp_global_t __kmp_global
void __kmp_set_schedule(int gtid, kmp_sched_t new_sched, int chunk)
@ omp_atk_preferred_device
void __kmp_init_target_mem()
KMP_EXPORT void * kmpc_malloc(size_t size)
union kmp_task_team kmp_task_team_t
void __kmp_hidden_helper_worker_thread_signal()
KMP_EXPORT void __kmpc_push_num_threads_list_strict(ident_t *loc, kmp_int32 global_tid, kmp_uint32 list_length, kmp_int32 *num_threads_list, int severity, const char *message)
void __kmp_teams_master(int gtid)
void __kmp_elapsed_tick(double *)
void __kmp_common_destroy(void)
void __kmp_common_initialize(void)
#define KMP_HASH_TABLE_SIZE
void __kmp_release_64(kmp_flag_64<> *flag)
void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al)
kmp_pause_status_t __kmp_pause_status
struct kmp_teams_size kmp_teams_size_t
enum kmp_target_offload_kind kmp_target_offload_kind_t
void __kmp_read_system_time(double *delta)
KMP_NORETURN void __kmp_abort_process(void)
void __kmp_free_thread(kmp_info_t *)
KMP_EXPORT kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task)
void __kmp_enable(int old_state)
kmp_bootstrap_lock_t __kmp_tp_cached_lock
void __kmp_check_stack_overlap(kmp_info_t *thr)
struct kmp_base_root kmp_base_root_t
void * __kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al)
void __kmp_infinite_loop(void)
kmp_info_t * __kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team, int tid)
KMP_EXPORT void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_reap_task_teams(void)
omp_memspace_handle_t const llvm_omp_target_host_mem_space
kmp_int32 __kmp_use_yield
size_t KMP_EXPAND_NAME ompc_get_affinity_format(char *buffer, size_t size)
char const * __kmp_barrier_type_name[bs_last_barrier]
char const * __kmp_barrier_pattern_name[bp_last_bar]
int __kmp_dflt_team_nth_ub
void __kmp_hidden_helper_threads_initz_wait()
void __kmp_pop_task_team_node(kmp_info_t *thread, kmp_team_t *team)
int __kmp_aux_get_num_teams()
KMP_EXPORT int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
struct dispatch_shared_info dispatch_shared_info_t
void __kmp_fini_target_mem()
Finalize target memory support.
struct kmp_taskgroup kmp_taskgroup_t
struct kmp_hws_item kmp_hws_item_t
void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr, kmp_team_t *team)
void __kmp_wait_to_unref_task_teams(void)
struct KMP_ALIGN_CACHE kmp_base_info kmp_base_info_t
void __kmp_wait_4_ptr(void *spinner, kmp_uint32 checker, kmp_uint32(*pred)(void *, kmp_uint32), void *obj)
void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team)
union kmp_team kmp_team_p
int __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, void *reduce_data, void(*reduce)(void *, void *))
struct KMP_ALIGN_CACHE dispatch_private_info dispatch_private_info_t
int __kmp_get_max_active_levels(int gtid)
KMP_EXPORT void * kmpc_aligned_malloc(size_t size, size_t alignment)
void ___kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al)
void __kmp_aux_set_library(enum library_type arg)
void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size, char const *format,...)
void __kmp_push_num_teams_51(ident_t *loc, int gtid, int num_teams_lb, int num_teams_ub, int num_threads)
#define __kmp_assign_root_init_mask()
int __kmp_dflt_max_active_levels
struct kmp_hot_team_ptr kmp_hot_team_ptr_t
KMP_EXPORT void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32 num_dims, const struct kmp_dim *dims)
void __kmp_unlock_suspend_mx(kmp_info_t *th)
kmp_bar_pat_e __kmp_barrier_gather_pat_dflt
unsigned short __kmp_get_random(kmp_info_t *thread)
void * __kmpc_calloc(int gtid, size_t nmemb, size_t sz, omp_allocator_handle_t al)
static kmp_team_t * __kmp_team_from_gtid(int gtid)
int __kmp_register_root(int initial_thread)
int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
omp_memspace_handle_t const omp_low_lat_mem_space
void __kmp_do_initialize_hidden_helper_threads()
int __kmp_storage_map_verbose_specified
struct kmp_local kmp_local_t
omp_allocator_handle_t __kmpc_get_default_allocator(int gtid)
kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier]
void __kmp_thread_sleep(int millis)
KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part)
kmp_tasking_mode_t __kmp_tasking_mode
char * __kmp_affinity_format
void __kmp_abort_thread(void)
volatile kmp_info_t * __kmp_thread_pool
void __kmp_internal_end_atexit(void)
kmp_hws_item_t __kmp_hws_die
KMP_EXPORT void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar)
volatile int __kmp_init_gtid
omp_allocator_handle_t __kmp_def_allocator
kmp_hws_item_t __kmp_hws_node
union KMP_ALIGN_CACHE kmp_sleep_team kmp_sleep_team_t
kmp_bootstrap_lock_t __kmp_task_team_lock
void * __kmp_alloc(int gtid, size_t align, size_t sz, omp_allocator_handle_t al)
int __kmp_omp_cancellation
void __kmp_suspend_64(int th_gtid, kmp_flag_64< C, S > *flag)
static void __kmp_resume_if_hard_paused()
kmp_hws_item_t __kmp_hws_tile
void * __kmp_calloc(int gtid, size_t align, size_t nmemb, size_t sz, omp_allocator_handle_t al)
kmp_nested_proc_bind_t __kmp_nested_proc_bind
void __kmp_free_implicit_task(kmp_info_t *this_thr)
KMP_EXPORT void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_hidden_helper_main_thread_release()
void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, void *data_addr, size_t pc_size)
fork_context_e
Tell the fork call which compiler generated the fork call, and therefore how to deal with the call.
@ fork_context_gnu
Called from GNU generated code, so must not invoke the microtask internally.
@ fork_context_intel
Called from Intel generated code.
void __kmp_exit_single(int gtid)
struct KMP_ALIGN_CACHE dispatch_private_info32 dispatch_private_info32_t
void __kmp_suspend_initialize(void)
int __kmp_get_team_size(int gtid, int level)
kmp_nested_nthreads_t __kmp_nested_nth
KMP_EXPORT void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind)
omp_allocator_handle_t const omp_default_mem_alloc
kmp_team_t * __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc, kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs, int argc, kmp_info_t *thr)
kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker)
kmp_cached_addr_t * __kmp_threadpriv_cache_list
@ atomic_flag64
atomic 64 bit flags
@ flag_oncore
special 64-bit flag for on-core barrier (hierarchical)
@ flag32
atomic 32 bit flags
void __kmp_internal_end_dtor(void)
union kmp_root kmp_root_p
kmp_uint64 __kmp_now_nsec()
union kmp_depnode kmp_depnode_t
KMP_EXPORT int __kmp_get_cancellation_status(int cancel_kind)
volatile int __kmp_all_nth
void __kmp_check_stksize(size_t *val)
kmp_target_offload_kind_t __kmp_target_offload
int __kmp_debug_buf_chars
int __kmpc_get_target_offload()
void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref)
KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid, kmp_critical_name *, uint32_t hint)
omp_memspace_handle_t __kmp_get_submemspace(omp_memspace_handle_t memspace, int num_resources, int *resources)
int __kmp_get_global_thread_id_reg(void)
void __kmp_push_num_threads(ident_t *loc, int gtid, int num_threads)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize(int)
#define SCHEDULE_HAS_MONOTONIC(s)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
int __kmp_is_address_mapped(void *addr)
kmp_lock_t __kmp_global_lock
int __kmp_barrier_gomp_cancel(int gtid)
double __kmp_read_cpu_time(void)
void __kmpc_destroy_allocator(int gtid, omp_allocator_handle_t al)
union KMP_ALIGN_CACHE kmp_root kmp_root_t
int __kmp_adjust_gtid_mode
#define __kmp_entry_gtid()
kmp_old_threads_list_t * __kmp_old_threads_list
void __kmp_internal_end_library(int gtid)
void __kmp_suspend_32(int th_gtid, kmp_flag_32< C, S > *flag)
struct kmp_internal_control kmp_internal_control_t
omp_allocator_handle_t __kmp_get_devices_allocator(int ndevs, const int *devs, omp_memspace_handle_t, int host)
void __kmp_hidden_helper_worker_thread_wait()
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
volatile int __kmp_init_common
void __kmp_set_max_active_levels(int gtid, int new_max_active_levels)
void __kmpc_dispatch_deinit(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_deinit.
enum sched_type __kmp_auto
void __kmp_init_random(kmp_info_t *thread)
static int __kmp_tid_from_gtid(int gtid)
static bool KMP_UBER_GTID(int gtid)
kmp_int32 __kmp_use_yield_exp_set
kmp_event_t * __kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid, kmp_task_t *task)
void __kmp_internal_end_thread(int gtid)
struct kmp_sys_info kmp_sys_info_t
KMP_EXPORT void __kmp_set_num_teams(int num_teams)
void __kmp_disable(int *old_state)
omp_allocator_handle_t const omp_large_cap_mem_alloc
volatile int __kmp_init_hidden_helper
void __kmp_push_num_threads_list(ident_t *loc, int gtid, kmp_uint32 list_length, int *num_threads_list)
struct kmp_depend_info kmp_depend_info_t
void __kmp_user_set_library(enum library_type arg)
const char * __kmp_hw_get_catalog_string(kmp_hw_t type, bool plural=false)
KMP_EXPORT void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
void __kmpc_set_default_allocator(int gtid, omp_allocator_handle_t al)
omp_allocator_handle_t const omp_low_lat_mem_alloc
@ KMP_EVENT_UNINITIALIZED
@ KMP_EVENT_ALLOW_COMPLETION
void __kmp_elapsed(double *)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_disp_num_buffers(int)
int __kmp_gtid_get_specific(void)
int __kmp_aux_get_team_num()
struct KMP_ALIGN_CACHE dispatch_private_info64 dispatch_private_info64_t
KMP_EXPORT void __kmpc_taskloop(ident_t *loc, kmp_int32 gtid, kmp_task_t *task, kmp_int32 if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup, kmp_int32 sched, kmp_uint64 grainsize, void *task_dup)
volatile int __kmp_init_middle
void __kmp_hidden_helper_threads_deinitz_wait()
omp_allocator_handle_t const omp_high_bw_mem_alloc
void __kmp_set_num_threads(int new_nth, int gtid)
std::atomic< kmp_int32 > __kmp_task_counter
void __kmpc_error(ident_t *loc, int severity, const char *message)
static kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind)
KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task)
KMP_EXPORT kmp_task_t * __kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry)
KMP_EXPORT void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid)
kmp_r_sched_t __kmp_get_schedule_global(void)
int __kmp_storage_map_verbose
int __kmp_allThreadsSpecified
enum sched_type __kmp_static
int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
void kmpc_set_blocktime(int arg)
KMP_EXPORT void __kmpc_taskloop_5(ident_t *loc, kmp_int32 gtid, kmp_task_t *task, kmp_int32 if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup, kmp_int32 sched, kmp_uint64 grainsize, kmp_int32 modifier, void *task_dup)
KMP_EXPORT void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task)
PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
enum kmp_tasking_mode kmp_tasking_mode_t
void * __kmp_realloc(int gtid, void *ptr, size_t sz, omp_allocator_handle_t al, omp_allocator_handle_t free_al)
void __kmp_end_split_barrier(enum barrier_type bt, int gtid)
int PACKED_REDUCTION_METHOD_T
std::atomic< int > __kmp_thread_pool_active_nth
void __kmp_hidden_helper_threads_initz_routine()
KMP_EXPORT void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid)
const char * __kmp_hw_get_keyword(kmp_hw_t type, bool plural=false)
union KMP_ALIGN_CACHE kmp_thread_data kmp_thread_data_t
kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker)
KMP_EXPORT void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, const kmp_int64 *vec)
int __kmp_affinity_num_places
int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws)
int __kmp_duplicate_library_ok
void * ___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL)
struct kmp_base_data kmp_base_data_t
struct kmp_base_thread_data kmp_base_thread_data_t
void KMP_EXPAND_NAME ompc_display_affinity(char const *format)
volatile int __kmp_need_register_serial
omp_memspace_handle_t const kmp_max_mem_space
#define KMP_PAD(type, sz)
void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team)
kmp_bootstrap_lock_t __kmp_forkjoin_lock
KMP_EXPORT kmp_uint64 __kmpc_get_taskid()
omp_memspace_handle_t const omp_const_mem_space
struct kmp_cg_root kmp_cg_root_t
kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier]
KMP_EXPORT int KMPC_CONVENTION kmpc_get_affinity_mask_proc(int, kmp_affinity_mask_t *)
static kmp_info_t * __kmp_entry_thread()
KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid)
void __kmp_init_memkind()
struct kmp_task_affinity_info kmp_task_affinity_info_t
int __kmp_get_ancestor_thread_num(int gtid, int level)
void __kmp_hidden_helper_main_thread_wait()
void * __kmp_launch_thread(kmp_info_t *thr)
void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr, kmp_team_t *team, int tid, int set_curr_task)
kmp_int32 __kmp_default_device
void __kmp_omp_display_env(int verbose)
omp_memspace_handle_t const omp_null_mem_space
void __kmp_cleanup_threadprivate_caches()
void __kmp_middle_initialize(void)
static void copy_icvs(kmp_internal_control_t *dst, kmp_internal_control_t *src)
struct kmp_depnode_list kmp_depnode_list_t
KMP_EXPORT void __kmpc_end_taskgroup(ident_t *loc, int gtid)
kmp_bootstrap_lock_t __kmp_exit_lock
KMP_EXPORT void __kmpc_omp_taskwait_deps_51(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list, kmp_int32 has_no_wait)
omp_memspace_handle_t const omp_large_cap_mem_space
int __kmp_force_monotonic
kmp_info_t ** __kmp_threads
void __kmp_abort(char const *format,...)
void __kmp_hidden_helper_initz_release()
enum sched_type __kmp_sched
void __kmp_suspend_uninitialize_thread(kmp_info_t *th)
void * ___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL)
struct kmp_cached_addr kmp_cached_addr_t
int __kmp_enable_task_throttling
void __kmp_unregister_root(int gtid)
void __kmp_finalize_bget(kmp_info_t *th)
static void __kmp_reset_root_init_mask(int gtid)
kmp_uint32 __kmp_barrier_gather_bb_dflt
KMP_EXPORT void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task)
kmp_uint32 __kmp_barrier_release_bb_dflt
struct dispatch_shared_info32 dispatch_shared_info32_t
int __kmp_task_stealing_constraint
int __kmp_need_register_atfork
struct private_common * kmp_threadprivate_insert(int gtid, void *pc_addr, void *data_addr, size_t pc_size)
void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref)
struct kmp_target_data kmp_target_data_t
int __kmp_dispatch_num_buffers
KMP_EXPORT void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
#define SCHEDULE_WITHOUT_MODIFIERS(s)
kmp_uint32 __kmp_yield_init
KMP_EXPORT void __kmp_set_teams_thread_limit(int limit)
void __kmp_internal_end_dest(void *)
union kmp_team kmp_team_t
void * __kmpc_realloc(int gtid, void *ptr, size_t sz, omp_allocator_handle_t al, omp_allocator_handle_t free_al)
void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag)
bool __kmp_dflt_max_active_levels_set
int __kmp_get_memspace_num_resources(omp_memspace_handle_t memspace)
void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr, kmp_team_t *team)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_defaults(char const *)
void __kmp_lock_suspend_mx(kmp_info_t *th)
struct dispatch_shared_info64 dispatch_shared_info64_t
omp_memspace_handle_t const llvm_omp_target_shared_mem_space
char * __kmp_debug_buffer
omp_memspace_handle_t const omp_high_bw_mem_space
void __kmp_parallel_initialize(void)
void __kmp_terminate_thread(int gtid)
omp_memspace_handle_t __kmp_get_devices_memspace(int ndevs, const int *devs, omp_memspace_handle_t, int host)
int __kmp_nesting_mode_nlevels
void __kmp_set_nesting_mode_threads()
void __kmp_unregister_library(void)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
See __kmpc_dispatch_next_4.
int(* launch_t)(int gtid)
int __kmp_ignore_mppbeg(void)
void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size)
int * __kmp_nesting_nth_level
KMP_EXPORT void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, const kmp_int64 *vec)
omp_allocator_handle_t const omp_const_mem_alloc
KMP_EXPORT void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
volatile int __kmp_init_parallel
KMP_EXPORT void __kmpc_push_num_threads_strict(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads, int severity, const char *message)
omp_allocator_handle_t const omp_pteam_mem_alloc
union kmp_barrier_union kmp_balign_t
KMP_EXPORT int KMPC_CONVENTION kmpc_set_affinity_mask_proc(int, kmp_affinity_mask_t *)
omp_allocator_handle_t const llvm_omp_target_host_mem_alloc
int __kmp_need_register_atfork_specified
omp_allocator_handle_t const kmp_max_mem_alloc
kmp_int32 __kmp_enable_hidden_helper
struct kmp_desc_base kmp_desc_base_t
enum kmp_sched kmp_sched_t
void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team)
void __kmp_aux_set_stacksize(size_t arg)
static const size_t KMP_AFFINITY_FORMAT_SIZE
enum library_type __kmp_library
void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid)
void __kmp_push_num_teams(ident_t *loc, int gtid, int num_teams, int num_threads)
struct kmp_tasking_flags kmp_tasking_flags_t
static bool __kmp_is_hybrid_cpu()
void __kmp_clear_system_time(void)
KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid()
struct KMP_ALIGN_CACHE kmp_base_team kmp_base_team_t
size_t __kmp_aux_capture_affinity(int gtid, const char *format, kmp_str_buf_t *buffer)
KMP_EXPORT int __kmp_get_max_teams(void)
void KMPC_SET_NESTED(int flag)
void(* kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,...)
void __kmp_get_schedule(int gtid, kmp_sched_t *sched, int *chunk)
kmp_team_t * __kmp_reap_team(kmp_team_t *)
kmp_key_t __kmp_gtid_threadprivate_key
KMP_EXPORT void * __kmpc_threadprivate(ident_t *, kmp_int32 global_tid, void *data, size_t size)
struct kmp_task_pri kmp_task_pri_t
kmp_hws_item_t __kmp_hws_socket
KMP_EXPORT void KMPC_CONVENTION ompc_set_schedule(omp_sched_t, int)
int __kmp_fork_call(ident_t *loc, int gtid, enum fork_context_e fork_context, kmp_int32 argc, microtask_t microtask, launch_t invoker, kmp_va_list ap)
kmp_info_t * __kmp_thread_pool_insert_pt
KMP_EXPORT kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind)
KMP_EXPORT void * kmpc_calloc(size_t nelem, size_t elsize)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
See __kmpc_dispatch_next_4.
int __kmp_get_global_thread_id(void)
int __kmp_env_consistency_check
#define bs_reduction_barrier
void __kmp_runtime_destroy(void)
kmp_uint64 __kmp_pause_init
kmp_uint64 __kmp_taskloop_min_tasks
KMP_EXPORT int KMPC_CONVENTION ompc_get_ancestor_thread_num(int)
union KMP_ALIGN_CACHE kmp_desc kmp_desc_t
char const * __kmp_barrier_branch_bit_env_name[bs_last_barrier]
kmp_hws_item_t __kmp_hws_proc
void __kmp_aux_display_affinity(int gtid, const char *format)
static void __kmp_sched_apply_mods_intkind(kmp_sched_t kind, enum sched_type *internal_kind)
void __kmp_fulfill_event(kmp_event_t *event)
KMP_EXPORT void __kmpc_taskgroup(ident_t *loc, int gtid)
int __kmp_read_system_info(struct kmp_sys_info *info)
void * ___kmp_thread_realloc(kmp_info_t *th, void *ptr, size_t size KMP_SRC_LOC_DECL)
kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker)
volatile int __kmp_hidden_helper_team_done
KMP_EXPORT kmp_depnode_list_t * __kmpc_task_get_successors(kmp_task_t *task)
void __kmp_push_proc_bind(ident_t *loc, int gtid, kmp_proc_bind_t proc_bind)
static void __kmp_sched_apply_mods_stdkind(kmp_sched_t *kind, enum sched_type internal_kind)
struct kmp_base_depnode kmp_base_depnode_t
union kmp_barrier_team_union kmp_balign_team_t
void __kmp_init_nesting_mode()
std::atomic< kmp_int32 > __kmp_unexecuted_hidden_helper_tasks
KMP_EXPORT int KMPC_CONVENTION kmpc_unset_affinity_mask_proc(int, kmp_affinity_mask_t *)
KMP_EXPORT void __kmpc_end_scope(ident_t *loc, kmp_int32 gtid, void *reserved)
KMP_EXPORT int __kmpc_invoke_task_func(int gtid)
void * __kmpc_aligned_alloc(int gtid, size_t align, size_t sz, omp_allocator_handle_t al)
size_t __kmp_sys_min_stksize
char __kmp_blocktime_units
union kmp_info kmp_info_p
void * ___kmp_allocate(size_t size KMP_SRC_LOC_DECL)
KMP_EXPORT void KMPC_CONVENTION ompc_set_max_active_levels(int)
struct kmp_sched_flags kmp_sched_flags_t
kmp_hws_item_t __kmp_hws_core
union KMP_ALIGN_CACHE kmp_ordered_team kmp_ordered_team_t
int __kmp_invoke_task_func(int gtid)
size_t KMP_EXPAND_NAME ompc_capture_affinity(char *buffer, size_t buf_size, char const *format)
struct kmp_base_global kmp_base_global_t
void ompc_set_nested(int flag)
void __kmp_set_strict_num_threads(ident_t *loc, int gtid, int sev, const char *msg)
kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier]
KMP_EXPORT void __kmpc_scope(ident_t *loc, kmp_int32 gtid, void *reserved)
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize_s(size_t)
size_t __kmp_malloc_pool_incr
static int __kmp_adjust_gtid_for_hidden_helpers(int gtid)
kmp_task_t * __kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry)
void __kmp_adjust_num_threads(int new_nproc)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
KMP_EXPORT void kmpc_free(void *ptr)
int __kmp_threads_capacity
KMP_EXPORT int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_info_t ** __kmp_hidden_helper_threads
kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker)
void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team, int tid)
int __kmp_debug_buf_warn_chars
static int __kmp_gtid_from_tid(int tid, const kmp_team_t *team)
KMP_EXPORT void KMPC_FOR_STATIC_INIT(ident_t *loc, kmp_int32 global_tid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int *plower, kmp_int *pupper, kmp_int *pstride, kmp_int incr, kmp_int chunk)
void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag)
bool __kmp_wpolicy_passive
void __kmp_save_internal_controls(kmp_info_t *thread)
void __kmp_push_task_team_node(kmp_info_t *thread, kmp_team_t *team)
void __kmp_threadprivate_resize_cache(int newCapacity)
union kmp_r_sched kmp_r_sched_t
void __kmp_runtime_initialize(void)
int __kmp_invoke_teams_master(int gtid)
void __kmp_hidden_helper_initialize()
volatile int __kmp_init_hidden_helper_threads
void KMPC_SET_NUM_THREADS(int arg)
KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind)
void __kmp_common_destroy_gtid(int gtid)
int __kmp_try_suspend_mx(kmp_info_t *th)
static void __kmp_aux_convert_blocktime(int *bt)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
See __kmpc_dispatch_next_4.
int __kmp_display_affinity
enum sched_type __kmp_guided
void __kmp_resume_64(int target_gtid, kmp_flag_64< C, S > *flag)
void __kmp_resume_32(int target_gtid, kmp_flag_32< C, S > *flag)
KMP_EXPORT int __kmp_get_teams_thread_limit(void)
#define KMP_INLINE_ARGV_ENTRIES
int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
const char * __kmp_hw_get_core_type_string(kmp_hw_core_type_t type)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method
void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64< C, S > *flag)
KMP_EXPORT int KMPC_CONVENTION ompc_get_team_size(int)
void * kmp_affinity_mask_t
void __kmp_serial_initialize(void)
omp_allocator_handle_t const omp_thread_mem_alloc
kmp_uint32 __kmp_wait_4(kmp_uint32 volatile *spinner, kmp_uint32 checker, kmp_uint32(*pred)(kmp_uint32, kmp_uint32), void *obj)
void __kmp_resume_if_soft_paused()
KMP_EXPORT void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
kmp_int32 __kmp_max_task_priority
void __kmp_initialize_bget(kmp_info_t *th)
static void __kmp_assert_valid_gtid(kmp_int32 gtid)
int __kmp_teams_thread_limit
KMP_EXPORT void * kmpc_realloc(void *ptr, size_t size)
void __kmp_cleanup_hierarchy()
KMP_EXPORT kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id)
KMP_EXPORT void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
void __kmp_serialized_parallel(ident_t *id, kmp_int32 gtid)
struct kmp_dephash_entry kmp_dephash_entry_t
void ompc_set_num_threads(int arg)
kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task, bool serialize_immediate)
struct kmp_base_task_team kmp_base_task_team_t
void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr)
void __kmp_gtid_set_specific(int gtid)
char const * __kmp_barrier_pattern_env_name[bs_last_barrier]
void __kmp_internal_begin(void)
std::atomic< int > __kmp_debug_count
void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team)
int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
static kmp_info_t * __kmp_thread_from_gtid(int gtid)
void KMP_EXPAND_NAME ompc_set_affinity_format(char const *format)
void __kmp_expand_file_name(char *result, size_t rlen, char *pattern)
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
void * ___kmp_thread_calloc(kmp_info_t *th, size_t nelem, size_t elsize KMP_SRC_LOC_DECL)
static int __kmp_gtid_from_thread(const kmp_info_t *thr)
omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc
@ KMP_HW_MAX_NUM_CORE_TYPES
@ KMP_HW_CORE_TYPE_UNKNOWN
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
See __kmpc_dispatch_init_4.
void __kmp_suspend_initialize_thread(kmp_info_t *th)
volatile int __kmp_init_serial
@ reduction_method_not_defined
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
See __kmpc_dispatch_init_4.
void __kmp_exit_thread(int exit_status)
KMP_EXPORT kmp_base_depnode_t * __kmpc_task_get_depnode(kmp_task_t *task)
int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int npr, int argc, void *argv[])
kmp_int32 __kmp_hidden_helper_threads_num
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
See __kmpc_dispatch_fini_4.
static void __kmp_type_convert(T1 src, T2 *dest)
void __kmp_join_call(ident_t *loc, int gtid, int exit_teams=0)
enum kmp_bar_pat kmp_bar_pat_e
void __kmp_fini_memkind()
KMP_EXPORT kmp_int32 __kmp_get_reduce_method(void)
omp_memspace_handle_t const llvm_omp_target_device_mem_space
int __kmp_ignore_mppend(void)
struct kmp_taskdata kmp_taskdata_t
void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag, int final_spin)
int __kmp_debug_buf_atomic
struct KMP_ALIGN_CACHE kmp_bstate kmp_bstate_t
KMP_EXPORT void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock)
kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier]
std::atomic< kmp_int32 > __kmp_team_counter
void __kmp_reap_worker(kmp_info_t *th)
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
See __kmpc_dispatch_init_4.
void __kmp_hidden_helper_threads_deinitz_release()
bool __kmp_hwloc_available
void __kmp_expand_host_name(char *buffer, size_t size)
int __kmpc_pause_resource(kmp_pause_status_t level)
union KMP_ALIGN_CACHE kmp_info kmp_info_t
enum sched_type __kmp_sch_map[]
void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team, int wait=1)
kmp_uint64 __kmp_hardware_timestamp(void)
void ___kmp_free(void *ptr KMP_SRC_LOC_DECL)
union KMP_ALIGN_CACHE kmp_time_global kmp_time_global_t
omp_allocator_handle_t const llvm_omp_target_device_mem_alloc
union KMP_ALIGN_CACHE kmp_global kmp_global_t
omp_allocator_handle_t const omp_null_allocator
kmp_uint32 __kmp_yield_next
void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid)
constexpr size_t alignment
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int16
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int8
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86<<, 2i, 1, KMP_ARCH_X86) ATOMIC_CMPXCHG(fixed2, shr, kmp_int16, 16, > KMP_ARCH_X86 KMP_ARCH_X86 kmp_uint32
#define KMP_BUILD_ASSERT(expr)
#define KMP_DEBUG_ASSERT(cond)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
void __kmp_fatal(kmp_msg_t message,...)
union kmp_user_lock * kmp_user_lock_p
kmp_ticket_lock_t kmp_bootstrap_lock_t
kmp_ticket_lock_t kmp_lock_t
union kmp_tas_lock kmp_tas_lock_t
void(* microtask_t)(int *gtid, int *npr,...)
#define INTERNODE_CACHE_LINE
#define KMP_ATTRIBUTE_TARGET_WAITPKG
#define KMP_EXPAND_NAME(api_name)
struct kmp_str_buf kmp_str_buf_t
struct kmp_depnode_list kmp_depnode_list_t
void microtask(int *global_tid, int *bound_tid)
int __kmpc_start_record_task(ident_t *, int, int, int)
void __kmpc_end_record_task(ident_t *, int, int, int)
struct ompt_lw_taskteam_s ompt_lw_taskteam_t
__attribute__((noinline))
struct private_common * data[KMP_HASH_TABLE_SIZE]
struct dispatch_private_info * next
std::atomic< kmp_uint32 > steal_flag
kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1]
volatile kmp_uint32 ordered_iteration
volatile kmp_uint32 iteration
volatile kmp_int32 num_done
volatile kmp_int64 num_done
volatile kmp_uint64 iteration
kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3]
volatile kmp_uint64 ordered_iteration
volatile kmp_uint32 buffer_index
kmp_int32 doacross_num_done
union dispatch_shared_info::shared_info u
volatile kmp_int32 doacross_buf_idx
volatile kmp_uint32 * doacross_flags
The ident structure that describes a source location.
kmp_int32 get_openmp_version()
char const * psource
String describing the source location.
kmp_int32 reserved_1
might be used in Fortran; see above
kmp_int32 reserved_2
not really used in Fortran any more; see above
kmp_int32 reserved_3
source[4] in Fortran, do not use for C++
kmp_int32 flags
also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member
Memory allocator information is shared with offload runtime.
omp_alloctrait_value_t target_access
omp_alloctrait_value_t atomic_scope
kmp_allocator_t * fb_data
omp_alloctrait_value_t fb
omp_alloctrait_value_t partition
omp_memspace_handle_t memspace
volatile kmp_uint32 t_value
std::atomic< kmp_int32 > npredecessors
std::atomic< kmp_int32 > nrefs
kmp_lock_t * mtx_locks[MAX_MTX_DEPS]
kmp_depnode_list_t * successors
enum dynamic_mode g_dynamic_mode
KMP_ALIGN_CACHE int th_set_nproc
kmp_hot_team_ptr_t * th_hot_teams
KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier]
kmp_cg_root_t * th_cg_roots
kmp_taskdata_t * th_current_task
KMP_ALIGN_CACHE kmp_team_p * th_serial_team
kmp_task_team_t * th_task_team
kmp_info_p * th_next_pool
kmp_uint64 th_team_bt_intervals
microtask_t th_teams_microtask
KMP_ALIGN_CACHE volatile kmp_int32 th_next_waiting
struct cons_header * th_cons
struct private_common * th_pri_head
omp_allocator_handle_t th_def_allocator
kmp_uint8 th_active_in_pool
std::atomic< kmp_uint32 > th_used_in_team
struct common_table * th_pri_common
kmp_teams_size_t th_teams_size
volatile void * th_sleep_loc
volatile kmp_uint32 th_spin_here
flag_type th_sleep_loc_type
kmp_proc_bind_t th_set_proc_bind
kmp_info_p * th_team_master
kmp_info_t * r_uber_thread
std::atomic< int > r_in_parallel
kmp_int32 tt_found_proxy_tasks
KMP_ALIGN_CACHE std::atomic< kmp_int32 > tt_unfinished_threads
kmp_bootstrap_lock_t tt_task_pri_lock
std::atomic< kmp_int32 > tt_num_task_pri
kmp_bootstrap_lock_t tt_threads_lock
kmp_int32 tt_untied_task_encountered
kmp_task_pri_t * tt_task_pri_list
kmp_int32 tt_hidden_helper_task_encountered
kmp_thread_data_t * tt_threads_data
KMP_ALIGN_CACHE volatile kmp_uint32 tt_active
kmp_task_team_t * tt_next
omp_allocator_handle_t t_def_allocator
kmp_nested_nthreads_t * t_nested_nth
std::atomic< void * > t_tg_reduce_data[2]
kmp_proc_bind_t t_proc_bind
KMP_ALIGN_CACHE void ** t_argv
void * t_inline_argv[KMP_INLINE_ARGV_ENTRIES]
kmp_task_team_t * t_task_team[2]
kmp_taskdata_t * t_implicit_task_taskdata
std::atomic< kmp_int32 > t_cancel_request
KMP_ALIGN_CACHE kmp_info_t ** t_threads
dispatch_shared_info_t * t_disp_buffer
KMP_ALIGN_CACHE kmp_internal_control_t * t_control_stack_top
KMP_ALIGN_CACHE int t_max_argc
std::atomic< int > t_construct
kmp_balign_team_t t_bar[bs_last_barrier]
KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered
KMP_ALIGN_CACHE int t_master_tid
std::atomic< int > t_tg_fini_counter[2]
char pad[sizeof(kmp_lock_t)]
kmp_int32 td_deque_ntasks
kmp_taskdata_t ** td_deque
kmp_int32 td_deque_last_stolen
kmp_bootstrap_lock_t td_deque_lock
kmp_uint32 * skip_per_level
KMP_ALIGN_CACHE volatile kmp_uint64 b_arrived
kmp_uint8 use_oncore_barrier
struct kmp_bstate * parent_bar
kmp_internal_control_t th_fixed_icvs
struct kmp_cached_addr * next
kmp_int32 cg_thread_limit
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
static TargetType to(SourceType src)
struct kmp_depend_info::@316005266217326044116252041340323012067311074074::@332100123210042362154056135173262054277336320356 flags
kmp_dephash_entry_t * next_in_bucket
kmp_depnode_list_t * last_set
kmp_depnode_list_t * prev_set
kmp_dephash_entry_t ** buckets
kmp_depnode_list_t * next
void(* th_dxo_fcn)(int *gtid, int *cid, ident_t *)
kmp_int32 th_doacross_buf_idx
volatile kmp_uint32 * th_doacross_flags
dispatch_private_info_t * th_dispatch_pr_current
kmp_int64 * th_doacross_info
dispatch_private_info_t * th_disp_buffer
void(* th_deo_fcn)(int *gtid, int *cid, ident_t *)
dispatch_shared_info_t * th_dispatch_sh_current
kmp_proc_bind_t proc_bind
struct kmp_internal_control * next
PACKED_REDUCTION_METHOD_T packed_reduction_method
volatile int this_construct
Memory space informaition is shared with offload runtime.
omp_memspace_handle_t memspace
kmp_proc_bind_t * bind_types
struct kmp_old_threads_list_t * next
int length[KMP_MAX_FIELDS]
int offset[KMP_MAX_FIELDS]
struct kmp_task_affinity_info::@350356156072337073036041067341377344020243343142 flags
kmp_task_team_list_t * next
kmp_task_team_t * task_team
void * shareds
pointer to block of pointers to shared vars
kmp_int32 part_id
part id for the task
kmp_routine_entry_t routine
pointer to routine to call for executing task
kmp_uint32 td_taskwait_counter
ident_t * td_taskwait_ident
kmp_task_team_t * td_task_team
kmp_dephash_t * td_dephash
kmp_taskdata_t * td_parent
std::atomic< kmp_int32 > td_incomplete_child_tasks
std::atomic< kmp_int32 > td_untied_count
kmp_taskgroup_t * td_taskgroup
kmp_info_p * td_alloc_thread
kmp_depnode_t * td_depnode
kmp_int32 td_taskwait_thread
kmp_tasking_flags_t td_flags
kmp_taskdata_t * td_last_tied
KMP_ALIGN_CACHE kmp_internal_control_t td_icvs
kmp_event_t td_allow_completion_event
kmp_target_data_t td_target_data
KMP_ALIGN_CACHE std::atomic< kmp_int32 > td_allocated_child_tasks
std::atomic< kmp_int32 > cancel_request
std::atomic< kmp_int32 > count
struct kmp_taskgroup * parent
kmp_int32 reduce_num_data
unsigned priority_specified
unsigned free_agent_eligible
unsigned destructors_thunk
struct private_common * next
struct private_common * link
struct private_data * next
union shared_common::@266224144110361140253270216164006207276271065136 cct
union shared_common::@165100254231020313260170005243051141063145174362 dt
struct private_data * pod_init
struct shared_common * next
union shared_common::@347252044212346117044211136052012370313062365105 ct
struct shared_common * data[KMP_HASH_TABLE_SIZE]
dispatch_private_info64_t p64
dispatch_private_info32_t p32
dispatch_shared_info64_t s64
dispatch_shared_info32_t s32
char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)]
kmp_routine_entry_t destructors
kmp_int32 priority
priority specified by user for the task
char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)]
char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)]
char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)]
char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)]
char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)]
enum sched_type r_sched_type
char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)]
char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)]
char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)]
char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)]
char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)]
kmp_base_thread_data_t td
char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)]
kmp_uint64 __kmp_ticks_per_usec
void __kmp_reap_monitor(kmp_info_t *th)
kmp_uint64 __kmp_ticks_per_msec
int __kmp_read_from_file(char const *path, char const *format,...)
void __kmp_register_atfork(void)
void __kmp_free_handle(kmp_thread_t tHandle)
void __kmp_affinity_determine_capable(const char *env_var)
void __kmp_affinity_bind_thread(int proc)
int __kmp_get_load_balance(int max)
int __kmp_still_running(kmp_info_t *th)
void __kmp_initialize_system_tick(void)
int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val)