24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
31#include <sys/resource.h>
34#include <libperfstat.h>
36#include <sys/syscall.h>
43#include <sys/sysinfo.h>
59#include <sys/sysctl.h>
60#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
62#include <sys/sysctl.h>
64#include <pthread_np.h>
68#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
70#include <sys/sysctl.h>
75#include <pthread_np.h>
80#include <sys/loadavg.h>
91#ifndef TIMEVAL_TO_TIMESPEC
93#define TIMEVAL_TO_TIMESPEC(tv, ts) \
95 (ts)->tv_sec = (tv)->tv_sec; \
96 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
101#define TS2NS(timespec) \
102 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
106#if KMP_HANDLE_SIGNALS
107typedef void (*sig_func_t)(
int);
109static sigset_t __kmp_sigset;
126static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
127 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
128 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
129 cond->c_cond.__c_waiting);
133#if ((KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
135 KMP_AFFINITY_SUPPORTED)
141 "Illegal set affinity operation when not capable");
143 kmp_affin_mask_t *
mask;
144 KMP_CPU_ALLOC_ON_STACK(
mask);
146 KMP_CPU_SET(which,
mask);
147 __kmp_set_system_affinity(
mask,
TRUE);
148 KMP_CPU_FREE_FROM_STACK(
mask);
161 if (mask_size %
sizeof(__kmp_affin_mask_size))
162 mask_size +=
sizeof(__kmp_affin_mask_size) -
163 mask_size %
sizeof(__kmp_affin_mask_size);
164 KMP_AFFINITY_ENABLE(mask_size);
166 (
"__kmp_affinity_determine_capable: "
167 "AIX OS affinity interface bindprocessor functional (mask size = "
168 "%" KMP_SIZE_T_SPEC
").\n",
169 __kmp_affin_mask_size));
181#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
182#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
183#elif KMP_OS_FREEBSD || KMP_OS_DRAGONFLY
184#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
186#define KMP_CPU_SET_SIZE_LIMIT (256)
189 int verbose = __kmp_affinity.flags.verbose;
190 int warnings = __kmp_affinity.flags.warnings;
191 enum affinity_type
type = __kmp_affinity.type;
200 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE,
buf);
201 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
202 "initial getaffinity call returned %ld errno = %d\n",
205 if (gCode < 0 && errno != EINVAL) {
208 (warnings && (
type != affinity_none) && (
type != affinity_default) &&
209 (
type != affinity_disabled))) {
218 KMP_AFFINITY_DISABLE();
221 }
else if (gCode > 0) {
223 KMP_AFFINITY_ENABLE(gCode);
224 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
225 "affinity supported (mask size %d)\n",
226 (
int)__kmp_affin_mask_size));
233 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
234 "searching for proper set size\n"));
236 for (
size = 1;
size <= KMP_CPU_SET_SIZE_LIMIT;
size *= 2) {
237 gCode = syscall(__NR_sched_getaffinity, 0,
size,
buf);
238 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
239 "getaffinity for mask size %ld returned %ld errno = %d\n",
240 size, gCode, errno));
243 if (errno == ENOSYS) {
245 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
246 "inconsistent OS call behavior: errno == ENOSYS for mask "
250 (warnings && (
type != affinity_none) &&
251 (
type != affinity_default) && (
type != affinity_disabled))) {
260 KMP_AFFINITY_DISABLE();
267 KMP_AFFINITY_ENABLE(gCode);
268 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
269 "affinity supported (mask size %d)\n",
270 (
int)__kmp_affin_mask_size));
274#elif KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
278 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
279 reinterpret_cast<cpuset_t *
>(
buf));
280 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
281 "initial getaffinity call returned %d errno = %d\n",
284 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
285 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
286 "affinity supported (mask size %d)\n",
287 (
int)__kmp_affin_mask_size));
295 KMP_AFFINITY_DISABLE();
296 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
297 "cannot determine mask size - affinity not supported\n"));
298 if (verbose || (warnings && (
type != affinity_none) &&
299 (
type != affinity_default) && (
type != affinity_disabled))) {
309int __kmp_futex_determine_capable() {
311 long rc = syscall(__NR_futex, &
loc, FUTEX_WAKE, 1, NULL, NULL, 0);
312 int retval = (rc == 0) || (errno != ENOSYS);
315 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
316 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
317 retval ?
"" :
" not"));
324#if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
332 new_value = old_value |
d;
337 new_value = old_value |
d;
346 new_value = old_value &
d;
351 new_value = old_value &
d;
360 new_value = old_value |
d;
365 new_value = old_value |
d;
374 new_value = old_value &
d;
379 new_value = old_value &
d;
384#if KMP_ARCH_X86 || KMP_ARCH_WASM
389 new_value = old_value +
d;
394 new_value = old_value +
d;
403 new_value = old_value +
d;
408 new_value = old_value +
d;
418 new_value = old_value |
d;
422 new_value = old_value |
d;
431 new_value = old_value &
d;
435 new_value = old_value &
d;
449#ifdef KMP_CANCEL_THREADS
450 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
451 status = pthread_cancel(th->th.th_info.ds.ds_thread);
466#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
467 KMP_OS_HAIKU || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
479 if ((
status = thr_stksegment(&
s)) < 0) {
485 KA_TRACE(60, (
"__kmp_set_stack_info: T#%d thr_stksegment returned size:"
486 " %lu, low addr: %p\n",
491 status = pthread_attr_init(&attr);
493#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
494 status = pthread_attr_get_np(pthread_self(), &attr);
497 status = pthread_getattr_np(pthread_self(), &attr);
503 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
504 " %lu, low addr: %p\n",
506 status = pthread_attr_destroy(&attr);
521 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
522 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
523 TCW_4(th->th.th_info.ds.ds_stackgrow,
TRUE);
528 int status, old_type, old_state;
529#ifdef KMP_BLOCK_SIGNALS
530 sigset_t new_set, old_set;
533#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
534 KMP_OS_OPENBSD || KMP_OS_HAIKU || KMP_OS_HURD || KMP_OS_SOLARIS || \
536 void *
volatile padding = 0;
540 gtid = ((
kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
547 __kmp_stats_thread_ptr = ((
kmp_info_t *)thr)->th.th_stats;
548 __kmp_stats_thread_ptr->startLife();
554 __kmp_itt_thread_name(gtid);
557#if KMP_AFFINITY_SUPPORTED
558 __kmp_affinity_bind_init_mask(gtid);
561#ifdef KMP_CANCEL_THREADS
562 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
565 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
569#if KMP_ARCH_X86 || KMP_ARCH_X86_64
571 __kmp_clear_x87_fpu_status_word();
572 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
573 __kmp_load_mxcsr(&__kmp_init_mxcsr);
576#ifdef KMP_BLOCK_SIGNALS
577 status = sigfillset(&new_set);
579 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
583#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
584 KMP_OS_OPENBSD || KMP_OS_HAIKU || KMP_OS_HURD || KMP_OS_SOLARIS || \
599#ifdef KMP_BLOCK_SIGNALS
600 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
610static void *__kmp_launch_monitor(
void *thr) {
611 int status, old_type, old_state;
612#ifdef KMP_BLOCK_SIGNALS
615 struct timespec interval;
619 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
631 __kmp_itt_thread_ignore();
639#ifdef KMP_CANCEL_THREADS
640 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
643 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
652 int sched = sched_getscheduler(0);
653 if (
sched == SCHED_FIFO ||
sched == SCHED_RR) {
656 struct sched_param param;
657 int max_priority = sched_get_priority_max(
sched);
660 sched_getparam(0, ¶m);
661 if (param.sched_priority < max_priority) {
662 param.sched_priority += 1;
663 rc = sched_setscheduler(0,
sched, ¶m);
689 if (__kmp_monitor_wakeups == 1) {
691 interval.tv_nsec = 0;
697 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
705 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
707 status = gettimeofday(&tval, NULL);
711 now.tv_sec += interval.tv_sec;
712 now.tv_nsec += interval.tv_nsec;
740 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
742#ifdef KMP_BLOCK_SIGNALS
743 status = sigfillset(&new_set);
745 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
749 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
757 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
768 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
775 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
783 pthread_attr_t thread_attr;
786 th->th.th_info.ds.ds_gtid = gtid;
797 th->th.th_stats = __kmp_stats_list->push_back(gtid);
801 th->th.th_stats = __kmp_stats_thread_ptr;
808 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
809 th->th.th_info.ds.ds_thread = pthread_self();
815 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
819#ifdef KMP_THREAD_ATTR
820 status = pthread_attr_init(&thread_attr);
824 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
838 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
839 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
842#ifdef _POSIX_THREAD_ATTR_STACKSIZE
843 status = pthread_attr_setstacksize(&thread_attr, stack_size);
844#ifdef KMP_BACKUP_STKSIZE
849 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
850 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
853 status = pthread_attr_setstacksize(&thread_attr, stack_size);
867 if (
status != 0 || !handle) {
868#ifdef _POSIX_THREAD_ATTR_STACKSIZE
887#if defined(LIBOMP_HAVE_PTHREAD_SET_NAME_NP)
888 pthread_set_name_np(handle,
"openmp_worker");
889#elif defined(LIBOMP_HAVE_PTHREAD_SETNAME_NP) && !KMP_OS_DARWIN
891 pthread_setname_np(handle,
"%s",
const_cast<char *
>(
"openmp_worker"));
893 pthread_setname_np(handle,
"openmp_worker");
898 th->th.th_info.ds.ds_thread = handle;
900#ifdef KMP_THREAD_ATTR
901 status = pthread_attr_destroy(&thread_attr);
914 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
921 pthread_attr_t thread_attr;
924 int auto_adj_size =
FALSE;
928 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
930 th->th.th_info.ds.ds_tid = 0;
931 th->th.th_info.ds.ds_gtid = 0;
934 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
947#ifdef KMP_THREAD_ATTR
948 if (__kmp_monitor_stksize == 0) {
949 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
950 auto_adj_size =
TRUE;
952 status = pthread_attr_init(&thread_attr);
956 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
961#ifdef _POSIX_THREAD_ATTR_STACKSIZE
962 status = pthread_attr_getstacksize(&thread_attr, &
size);
969 if (__kmp_monitor_stksize == 0) {
970 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
976 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
977 "requested stacksize = %lu bytes\n",
978 size, __kmp_monitor_stksize));
983#ifdef _POSIX_THREAD_ATTR_STACKSIZE
984 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
985 __kmp_monitor_stksize));
986 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
989 __kmp_monitor_stksize *= 2;
994 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
1003 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
1006#ifdef _POSIX_THREAD_ATTR_STACKSIZE
1008 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
1009 __kmp_monitor_stksize *= 2;
1029 th->th.th_info.ds.ds_thread = handle;
1031#if KMP_REAL_TIME_FIX
1039#ifdef KMP_THREAD_ATTR
1040 status = pthread_attr_destroy(&thread_attr);
1053 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
1054 th->th.th_info.ds.ds_thread));
1063 pthread_exit((
void *)(intptr_t)exit_status);
1068void __kmp_resume_monitor();
1074 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1076 th->th.th_info.ds.ds_thread));
1083 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1093 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1095 __kmp_resume_monitor();
1097 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1098 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1099 if (exit_val != th) {
1106 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1108 th->th.th_info.ds.ds_thread));
1125 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1127 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1133 if (exit_val != th) {
1134 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1136 th->th.th_info.ds.ds_gtid, exit_val));
1142 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1143 th->th.th_info.ds.ds_gtid));
1148#if KMP_HANDLE_SIGNALS
1150static void __kmp_null_handler(
int signo) {
1154static void __kmp_team_handler(
int signo) {
1192static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1193 struct sigaction *oldact) {
1194 int rc = sigaction(signum, act, oldact);
1198static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1199 int parallel_init) {
1202 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1203 if (parallel_init) {
1204 struct sigaction new_action;
1205 struct sigaction old_action;
1206 new_action.sa_handler = handler_func;
1207 new_action.sa_flags = 0;
1208 sigfillset(&new_action.sa_mask);
1209 __kmp_sigaction(sig, &new_action, &old_action);
1210 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1211 sigaddset(&__kmp_sigset, sig);
1214 __kmp_sigaction(sig, &old_action, NULL);
1218 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1223static void __kmp_remove_one_handler(
int sig) {
1224 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1225 if (sigismember(&__kmp_sigset, sig)) {
1226 struct sigaction old;
1228 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1229 if ((old.sa_handler != __kmp_team_handler) &&
1230 (old.sa_handler != __kmp_null_handler)) {
1232 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1233 "restoring: sig=%d\n",
1235 __kmp_sigaction(sig, &old, NULL);
1237 sigdelset(&__kmp_sigset, sig);
1242void __kmp_install_signals(
int parallel_init) {
1243 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1244 if (__kmp_handle_signals || !parallel_init) {
1247 sigemptyset(&__kmp_sigset);
1248 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1249 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1250 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1251 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1252 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1253 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1254 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1255 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1257 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1259 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1261 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1266void __kmp_remove_signals(
void) {
1268 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1269 for (sig = 1; sig < NSIG; ++sig) {
1270 __kmp_remove_one_handler(sig);
1277#ifdef KMP_CANCEL_THREADS
1279 status = pthread_setcancelstate(new_state, &old_state);
1286#ifdef KMP_CANCEL_THREADS
1288 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1315#if KMP_AFFINITY_SUPPORTED
1316#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
1320 kmp_set_thread_affinity_mask_initial();
1328 for (kmp_affinity_t *affinity : __kmp_affinities)
1329 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1330 __kmp_affin_fullMask =
nullptr;
1331 __kmp_affin_origMask =
nullptr;
1336 __kmp_init_monitor = 0;
1345#if !KMP_USE_DYNAMIC_LOCK
1361 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1367 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1429 if (old_value == new_value)
1433 &th->th.th_suspend_init_count, old_value, -1)) {
1440 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1443 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1456 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1460 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1464 --th->th.th_suspend_init_count;
1472 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1476 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1481 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1492 typename C::flag_t old_spin;
1494 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1501 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1502 th_gtid,
flag->get()));
1506 old_spin =
flag->set_sleeping();
1508 th->th.th_sleep_loc_type =
flag->get_type();
1511 flag->unset_sleeping();
1512 TCW_PTR(th->th.th_sleep_loc, NULL);
1517 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1519 th_gtid,
flag->get(),
flag->load(), old_spin));
1521 if (
flag->done_check_val(old_spin) ||
flag->done_check()) {
1522 flag->unset_sleeping();
1523 TCW_PTR(th->th.th_sleep_loc, NULL);
1525 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1527 th_gtid,
flag->get()));
1532 int deactivated =
FALSE;
1534 while (
flag->is_sleeping()) {
1537 __kmp_suspend_count++;
1538 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1539 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1545 th->th.th_active =
FALSE;
1546 if (th->th.th_active_in_pool) {
1547 th->th.th_active_in_pool =
FALSE;
1557#if USE_SUSPEND_TIMEOUT
1558 struct timespec now;
1559 struct timeval tval;
1562 status = gettimeofday(&tval, NULL);
1567 now.tv_sec += msecs / 1000;
1568 now.tv_nsec += (msecs % 1000) * 1000;
1570 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1571 "pthread_cond_timedwait\n",
1573 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1574 &th->th.th_suspend_mx.m_mutex, &now);
1576 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1577 " pthread_cond_wait\n",
1579 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1580 &th->th.th_suspend_mx.m_mutex);
1589 if (!
flag->is_sleeping() &&
1594 flag->unset_sleeping();
1595 TCW_PTR(th->th.th_sleep_loc, NULL);
1599 if (
status == ETIMEDOUT) {
1600 if (
flag->is_sleeping()) {
1602 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1604 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1607 TCW_PTR(th->th.th_sleep_loc, NULL);
1610 }
else if (
flag->is_sleeping()) {
1612 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1619 th->th.th_active =
TRUE;
1620 if (
TCR_4(th->th.th_in_pool)) {
1622 th->th.th_active_in_pool =
TRUE;
1628 TCW_PTR(th->th.th_sleep_loc, NULL);
1636 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1637 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1643 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1646template <
bool C,
bool S>
1650template <
bool C,
bool S>
1654template <
bool C,
bool S>
1683 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1684 gtid, target_gtid));
1691 if (!
flag ||
flag != th->th.th_sleep_loc) {
1694 flag = (
C *)
CCAST(
void *, th->th.th_sleep_loc);
1700 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1701 "awake: flag(%p)\n",
1702 gtid, target_gtid, (
void *)NULL));
1705 }
else if (
flag->get_type() != th->th.th_sleep_loc_type) {
1710 (
"__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1711 "spin(%p) type=%d ptr_type=%d\n",
1712 gtid, target_gtid,
flag,
flag->get(),
flag->get_type(),
1713 th->th.th_sleep_loc_type));
1719 if (!
flag->is_sleeping()) {
1720 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1721 "awake: flag(%p): %u\n",
1722 gtid, target_gtid,
flag->get(), (
unsigned int)
flag->load()));
1728 flag->unset_sleeping();
1729 TCW_PTR(th->th.th_sleep_loc, NULL);
1732 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1733 "sleep bit for flag's loc(%p): %u\n",
1734 gtid, target_gtid,
flag->get(), (
unsigned int)
flag->load()));
1739 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1740 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1741 target_gtid, buffer);
1744 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1747 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1749 gtid, target_gtid));
1752template <
bool C,
bool S>
1756template <
bool C,
bool S>
1760template <
bool C,
bool S>
1775void __kmp_resume_monitor() {
1780 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1790 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1798 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1810 (
void *)(intptr_t)(gtid + 1));
1813 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1820 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1821 "KMP_GTID_SHUTDOWN\n"));
1830 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1841 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1842 (
double)CLOCKS_PER_SEC;
1847 struct rusage r_usage;
1849 memset(info, 0,
sizeof(*info));
1851 status = getrusage(RUSAGE_SELF, &r_usage);
1856 info->
maxrss = r_usage.ru_maxrss;
1858 info->
minflt = r_usage.ru_minflt;
1860 info->
majflt = r_usage.ru_majflt;
1862 info->
nswap = r_usage.ru_nswap;
1864 info->
inblock = r_usage.ru_inblock;
1866 info->
oublock = r_usage.ru_oublock;
1868 info->
nvcsw = r_usage.ru_nvcsw;
1870 info->
nivcsw = r_usage.ru_nivcsw;
1878 struct timeval tval;
1879 struct timespec
stop;
1882 status = gettimeofday(&tval, NULL);
1886 *delta = (t_ns * 1e-9);
1890 struct timeval tval;
1892 status = gettimeofday(&tval, NULL);
1905#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1906 KMP_OS_HAIKU || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1912 size_t len =
sizeof(
r);
1913 sysctlbyname(
"hw.logicalcpu", &
r, &len, NULL, 0);
1917#error "Unknown or unsupported OS."
1921 return r > 0 ?
r : 2;
1929 va_start(
args, format);
1930 FILE *
f = fopen(
path,
"rb");
1944 pthread_mutexattr_t mutex_attr;
1945 pthread_condattr_t cond_attr;
1951#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1952 if (!__kmp_cpuinfo.initialized) {
1953 __kmp_query_cpuid(&__kmp_cpuinfo);
1963 status = getrlimit(RLIMIT_STACK, &rlim);
1970 if (sysconf(_SC_THREADS)) {
2003 status = pthread_mutexattr_init(&mutex_attr);
2007 status = pthread_mutexattr_destroy(&mutex_attr);
2009 status = pthread_condattr_init(&cond_attr);
2013 status = pthread_condattr_destroy(&cond_attr);
2016 __kmp_itt_initialize();
2030 __kmp_itt_destroy();
2044#if KMP_AFFINITY_SUPPORTED
2045 __kmp_affinity_uninitialize();
2061 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2068 status = gettimeofday(&tv, NULL);
2081 gettimeofday(&t, NULL);
2087#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2097 diff = nsec2 - nsec;
2099 double tpus = 1000.0 * (
double)(
delay + (now - goal)) / (
double)diff;
2116#if KMP_OS_LINUX || KMP_OS_HURD
2124 file = fopen(
name,
"r");
2129 void *beginning = NULL;
2130 void *ending = NULL;
2133 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2141 if ((
addr >= beginning) && (
addr < ending)) {
2143 if (strcmp(perms,
"rw") == 0) {
2157 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2158 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2163 lstsz = lstsz * 4 / 3;
2165 rc = sysctl(mib, 4,
buf, &lstsz, NULL, 0);
2172 char *up =
buf + lstsz;
2175 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2176 size_t cursz = cur->kve_structsize;
2179 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2180 void *
end =
reinterpret_cast<void *
>(cur->kve_end);
2183 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2184 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2192#elif KMP_OS_DRAGONFLY
2193 char err[_POSIX2_LINE_MAX];
2197 vm_map_entry entry, *c;
2203 fd = kvm_openfiles(
nullptr,
nullptr,
nullptr, O_RDONLY,
err);
2208 proc = kvm_getprocs(fd, KERN_PROC_PID, getpid(), &num);
2210 if (kvm_read(fd,
static_cast<uintptr_t
>(proc->kp_paddr), &
p,
sizeof(
p)) !=
2212 kvm_read(fd,
reinterpret_cast<uintptr_t
>(
p.p_vmspace), &sp,
sizeof(sp)) !=
2220 uaddr =
reinterpret_cast<uintptr_t
>(
addr);
2221 for (c = kvm_vm_map_entry_first(fd, cur, &entry); c;
2222 c = kvm_vm_map_entry_next(fd, c, &entry)) {
2223 if ((uaddr >= entry.ba.start) && (uaddr <= entry.ba.end)) {
2224 if ((entry.protection & VM_PROT_READ) != 0 &&
2225 (entry.protection & VM_PROT_WRITE) != 0) {
2234 prxmap_t *cur, *map;
2239 pid_t pid = getpid();
2241 fd = open(
name, O_RDONLY);
2247 size_t sz = (1 << 20);
2250 while (sz > 0 && (
rd = pread(fd,
buf, sz, 0)) == sz) {
2257 map =
reinterpret_cast<prxmap_t *
>(
buf);
2258 uaddr =
reinterpret_cast<uintptr_t
>(
addr);
2260 for (cur = map;
rd > 0; cur++,
rd = -
sizeof(*map)) {
2261 if (uaddr >= cur->pr_vaddr && uaddr < cur->pr_vaddr) {
2262 if ((cur->pr_mflags & MA_READ) != 0 && (cur->pr_mflags & MA_WRITE) != 0) {
2279 rc = vm_read_overwrite(
2281 (vm_address_t)(
addr),
2283 (vm_address_t)(&buffer),
2296 mib[2] = VM_PROC_MAP;
2298 mib[4] =
sizeof(
struct kinfo_vmentry);
2301 rc = sysctl(mib, __arraycount(mib), NULL, &
size, NULL, 0);
2309 rc = sysctl(mib, __arraycount(mib), kiv, &
size, NULL, 0);
2313 for (
size_t i = 0;
i <
size;
i++) {
2314 if (kiv[
i].kve_start >= (uint64_t)
addr &&
2315 kiv[
i].kve_end <= (uint64_t)
addr) {
2325 mib[1] = KERN_PROC_VMMAP;
2330 rc = sysctl(mib, 3, NULL, &
size, NULL, 0);
2335 struct kinfo_vmentry kiv = {.kve_start = 0};
2337 while ((rc = sysctl(mib, 3, &kiv, &
size, NULL, 0)) == 0) {
2339 if (kiv.kve_end ==
end)
2342 if (kiv.kve_start >= (uint64_t)
addr && kiv.kve_end <= (uint64_t)
addr) {
2349 found = (
int)
addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2352 uint32_t loadQueryBufSize = 4096u;
2357 if (loadQueryBuf == NULL) {
2361 rc = loadquery(L_GETXINFO | L_IGNOREUNLOAD, loadQueryBuf, loadQueryBufSize);
2364 if (errno != ENOMEM) {
2368 loadQueryBufSize <<= 1;
2375 struct ld_xinfo *curLdInfo = (
struct ld_xinfo *)loadQueryBuf;
2379 uintptr_t curDataStart = (uintptr_t)curLdInfo->ldinfo_dataorg;
2380 uintptr_t curDataEnd = curDataStart + curLdInfo->ldinfo_datasize;
2383 if (curDataStart <= (uintptr_t)
addr && (uintptr_t)
addr < curDataEnd) {
2387 if (curLdInfo->ldinfo_next == 0u) {
2391 curLdInfo = (
struct ld_xinfo *)((
char *)curLdInfo + curLdInfo->ldinfo_next);
2400#error "Unknown or unsupported OS"
2408#ifdef USE_LOAD_BALANCE
2410#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2411 KMP_OS_OPENBSD || KMP_OS_SOLARIS
2422 int res = getloadavg(averages, 3);
2427 if (__kmp_load_balance_interval < 180 && (
res >= 1)) {
2428 ret_avg = (
int)averages[0];
2429 }
else if ((__kmp_load_balance_interval >= 180 &&
2430 __kmp_load_balance_interval < 600) &&
2432 ret_avg = (
int)averages[1];
2433 }
else if ((__kmp_load_balance_interval >= 600) && (
res == 3)) {
2434 ret_avg = (
int)averages[2];
2448 static int glb_running_threads = 0;
2450 static double glb_call_time = 0;
2451 int running_threads = 0;
2453 double call_time = 0.0;
2457 if (glb_call_time &&
2458 (call_time - glb_call_time < __kmp_load_balance_interval))
2459 return glb_running_threads;
2461 glb_call_time = call_time;
2468 int logical_cpus = perfstat_cpu(NULL, NULL,
sizeof(perfstat_cpu_t), 0);
2469 if (logical_cpus <= 0) {
2475 logical_cpus *
sizeof(perfstat_cpu_t));
2476 if (cpu_stat == NULL) {
2483 perfstat_id_t first_cpu_name;
2484 strcpy(first_cpu_name.name, FIRST_CPU);
2487 int rc = perfstat_cpu(&first_cpu_name, cpu_stat,
sizeof(perfstat_cpu_t),
2495 for (
int i = 0;
i < logical_cpus; ++
i) {
2496 running_threads += cpu_stat[
i].runque;
2497 if (running_threads >= max)
2505 if (running_threads <= 0)
2506 running_threads = 1;
2510 glb_running_threads = running_threads;
2512 return running_threads;
2522 static int permanent_error = 0;
2523 static int glb_running_threads = 0;
2525 static double glb_call_time = 0;
2527 int running_threads = 0;
2529 DIR *proc_dir = NULL;
2530 struct dirent *proc_entry = NULL;
2533 DIR *task_dir = NULL;
2535 int task_path_fixed_len;
2539 int stat_path_fixed_len;
2542 int total_processes = 0;
2545 double call_time = 0.0;
2552 if (glb_call_time &&
2553 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2554 running_threads = glb_running_threads;
2558 glb_call_time = call_time;
2561 if (permanent_error) {
2562 running_threads = -1;
2571 proc_dir = opendir(
"/proc");
2572 if (proc_dir == NULL) {
2575 running_threads = -1;
2576 permanent_error = 1;
2582 task_path_fixed_len = task_path.
used;
2584 proc_entry = readdir(proc_dir);
2585 while (proc_entry != NULL) {
2588 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2599 strcmp(proc_entry->d_name,
"1") == 0);
2602 task_path.
used = task_path_fixed_len;
2607 task_dir = opendir(task_path.
str);
2608 if (task_dir == NULL) {
2617 if (strcmp(proc_entry->d_name,
"1") == 0) {
2618 running_threads = -1;
2619 permanent_error = 1;
2627 stat_path_fixed_len = stat_path.
used;
2632 if (proc_entry->d_type == DT_DIR && isdigit(
task_entry->d_name[0])) {
2639 stat_path_fixed_len;
2646 stat_file = open(stat_path.
str, O_RDONLY);
2647 if (stat_file == -1) {
2677 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2684 char *close_parent = strstr(buffer,
") ");
2685 if (close_parent != NULL) {
2686 char state = *(close_parent + 2);
2689 if (running_threads >= max) {
2705 proc_entry = readdir(proc_dir);
2712 if (running_threads <= 0) {
2713 running_threads = 1;
2717 if (proc_dir != NULL) {
2721 if (task_dir != NULL) {
2725 if (stat_file != -1) {
2729 glb_running_threads = running_threads;
2731 return running_threads;
2739#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2740 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2741 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2742 KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF || \
2743 KMP_ARCH_AARCH64_32)
2759 void *,
void *,
void *);
2761 void *,
void *,
void *,
void *);
2763 void *,
void *,
void *,
void *,
void *);
2765 void *,
void *,
void *,
void *,
void *,
void *);
2767 void *,
void *,
void *,
void *,
void *,
void *,
2770 void *,
void *,
void *,
void *,
void *,
void *,
2773 void *,
void *,
void *,
void *,
void *,
void *,
2774 void *,
void *,
void *);
2776 void *,
void *,
void *,
void *,
void *,
void *,
2777 void *,
void *,
void *,
void *);
2779 void *,
void *,
void *,
void *,
void *,
void *,
2780 void *,
void *,
void *,
void *,
void *);
2788 void **exit_frame_ptr
2797 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2807 (*(
microtask_t2)pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2810 (*(
microtask_t3)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2813 (*(
microtask_t4)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2817 (*(
microtask_t5)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2818 p_argv[3], p_argv[4]);
2821 (*(
microtask_t6)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2822 p_argv[3], p_argv[4], p_argv[5]);
2825 (*(
microtask_t7)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2826 p_argv[3], p_argv[4], p_argv[5], p_argv[6]);
2829 (*(
microtask_t8)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2830 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2834 (*(
microtask_t9)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2835 p_argv[3], p_argv[4], p_argv[5], p_argv[6], p_argv[7],
2839 (*(
microtask_t10)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2840 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2841 p_argv[7], p_argv[8], p_argv[9]);
2844 (*(
microtask_t11)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2845 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2846 p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2849 (*(
microtask_t12)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2850 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2851 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2855 (*(
microtask_t13)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2856 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2857 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2858 p_argv[11], p_argv[12]);
2861 (*(
microtask_t14)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2862 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2863 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2864 p_argv[11], p_argv[12], p_argv[13]);
2867 (*(
microtask_t15)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2868 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2869 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2870 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2883pthread_cond_t hidden_helper_threads_initz_cond_var;
2884pthread_mutex_t hidden_helper_threads_initz_lock;
2885volatile int hidden_helper_initz_signaled =
FALSE;
2888pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2889pthread_mutex_t hidden_helper_threads_deinitz_lock;
2890volatile int hidden_helper_deinitz_signaled =
FALSE;
2893pthread_cond_t hidden_helper_main_thread_cond_var;
2894pthread_mutex_t hidden_helper_main_thread_lock;
2895volatile int hidden_helper_main_thread_signaled =
FALSE;
2900sem_t hidden_helper_task_sem;
2904 int status = sem_wait(&hidden_helper_task_sem);
2911 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2914 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2917 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2920 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2923 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2926 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2930 status = sem_init(&hidden_helper_task_sem, 0, 0);
2937 [](
void *) ->
void * {
2948 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2951 if (!
TCR_4(hidden_helper_initz_signaled)) {
2952 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2953 &hidden_helper_threads_initz_lock);
2957 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2963 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2966 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2971 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2978 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2981 if (!
TCR_4(hidden_helper_main_thread_signaled)) {
2982 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2983 &hidden_helper_main_thread_lock);
2987 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2994 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2997 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
3003 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
3008 int status = sem_post(&hidden_helper_task_sem);
3015 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
3018 if (!
TCR_4(hidden_helper_deinitz_signaled)) {
3019 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
3020 &hidden_helper_threads_deinitz_lock);
3024 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
3029 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
3032 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
3037 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
3042 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3046 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3050 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3054 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3058 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3062 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3066 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3070 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3074 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3079 DIR *dir = opendir(
"/dev/shm");
3083 }
else if (ENOENT == errno) {
3091 DIR *dir = opendir(
"/tmp");
3095 }
else if (ENOENT == errno) {
int task_entry(kmp_int32 gtid, kmp_task_t *task)
__itt_string_handle * name
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope ITT_FORMAT d const __itt_domain __itt_scope __itt_string_handle const char size_t ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_relation __itt_id ITT_FORMAT lu __itt_track_group __itt_string_handle __itt_track_group_type ITT_FORMAT d __itt_track ITT_FORMAT p void int const int int const char int ITT_FORMAT d void void const char * path
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
volatile kmp_team_t * __kmp_team_pool
int __kmp_generate_warnings
volatile int __kmp_init_user_locks
#define KMP_INTERNAL_MALLOC(sz)
kmp_bootstrap_lock_t __kmp_initz_lock
kmp_global_t __kmp_global
kmp_pause_status_t __kmp_pause_status
#define KMP_MAX_BLOCKTIME
void __kmp_check_stack_overlap(kmp_info_t *thr)
#define KMP_INTERNAL_REALLOC(p, sz)
volatile kmp_info_t * __kmp_thread_pool
volatile int __kmp_init_gtid
kmp_bootstrap_lock_t __kmp_task_team_lock
kmp_nested_proc_bind_t __kmp_nested_proc_bind
#define KMP_GTID_SHUTDOWN
kmp_cached_addr_t * __kmp_threadpriv_cache_list
volatile int __kmp_all_nth
void __kmp_check_stksize(size_t *val)
volatile int __kmp_init_common
static bool KMP_UBER_GTID(int gtid)
#define KMP_DEFAULT_STKSIZE
volatile int __kmp_init_middle
std::atomic< int > __kmp_thread_pool_active_nth
void __kmp_hidden_helper_threads_initz_routine()
volatile int __kmp_need_register_serial
kmp_bootstrap_lock_t __kmp_forkjoin_lock
void * __kmp_launch_thread(kmp_info_t *thr)
kmp_info_t ** __kmp_threads
int __kmp_need_register_atfork
void __kmp_internal_end_dest(void *)
void __kmp_unregister_library(void)
volatile int __kmp_init_parallel
kmp_key_t __kmp_gtid_threadprivate_key
kmp_info_t * __kmp_thread_pool_insert_pt
kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker)
size_t __kmp_sys_min_stksize
#define KMP_INTERNAL_FREE(p)
int __kmp_threads_capacity
void __kmp_serial_initialize(void)
kmp_uint32 __kmp_wait_4(kmp_uint32 volatile *spinner, kmp_uint32 checker, kmp_uint32(*pred)(kmp_uint32, kmp_uint32), void *obj)
volatile int __kmp_init_serial
static void __kmp_type_convert(T1 src, T2 *dest)
union KMP_ALIGN_CACHE kmp_info kmp_info_t
kmp_uint64 __kmp_hardware_timestamp(void)
kmp_topology_t * __kmp_topology
KMP_ARCH_X86 KMP_ARCH_X86 long double
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int8
void __kmp_dump_debug_buffer(void)
void __kmp_debug_printf(char const *format,...)
#define KMP_DEBUG_ASSERT(cond)
#define KMP_ASSERT2(cond, msg)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
static void __kmp_msg(kmp_msg_severity_t severity, kmp_msg_t message, va_list ap)
void __kmp_fatal(kmp_msg_t message,...)
#define KMP_CHECK_SYSFAIL(func, error)
#define KMP_CHECK_SYSFAIL_ERRNO(func, status)
#define KMP_SYSFAIL(func, error)
kmp_bootstrap_lock_t __kmp_console_lock
kmp_bootstrap_lock_t __kmp_stdio_lock
void __kmp_printf(char const *format,...)
int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
kmp_block_of_locks * __kmp_lock_blocks
int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
kmp_lock_table_t __kmp_user_lock_table
static void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck)
static int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck)
static void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck)
void(* microtask_t)(int *gtid, int *npr,...)
#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv)
#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv)
#define KMP_ATOMIC_ST_REL(p, v)
bool __kmp_atomic_compare_store(std::atomic< T > *p, T expected, T desired)
#define KMP_ATOMIC_LD_ACQ(p)
#define STATIC_EFI2_WORKAROUND
#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv)
#define KMP_ATOMIC_DEC(p)
#define KMP_ATOMIC_LD_RLX(p)
#define KMP_ATOMIC_INC(p)
Functions for collecting statistics.
#define KMP_INIT_PARTITIONED_TIMERS(name)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n)
#define KMP_SET_THREAD_STATE(state_name)
void __kmp_str_buf_clear(kmp_str_buf_t *buffer)
void __kmp_str_buf_free(kmp_str_buf_t *buffer)
char * __kmp_str_format(char const *format,...)
void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, size_t len)
void __kmp_str_free(char **str)
#define __kmp_str_buf_init(b)
static void __kmp_null_resume_wrapper(kmp_info_t *thr)
#define OMPT_GET_FRAME_ADDRESS(level)
struct kmp_cached_addr * next
kmp_lock_index_t allocated
kmp_proc_bind_t * bind_types
void(* microtask_t13)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
static void __kmp_atfork_child(void)
void __kmp_hidden_helper_worker_thread_signal()
template void __kmp_atomic_resume_64< false, true >(int, kmp_atomic_flag_64< false, true > *)
template void __kmp_suspend_64< true, false >(int, kmp_flag_64< true, false > *)
void(* microtask_t5)(int *, int *, void *, void *, void *, void *, void *)
static kmp_mutex_align_t __kmp_wait_mx
void __kmp_read_system_time(double *delta)
void(* microtask_t0)(int *, int *)
void __kmp_hidden_helper_threads_initz_wait()
static void __kmp_suspend_template(int th_gtid, C *flag)
void __kmp_enable(int new_state)
void __kmp_unlock_suspend_mx(kmp_info_t *th)
void(* microtask_t7)(int *, int *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_do_initialize_hidden_helper_threads()
static pthread_mutexattr_t __kmp_suspend_mutex_attr
void(* microtask_t1)(int *, int *, void *)
void __kmp_thread_sleep(int millis)
static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th)
void __kmp_suspend_64(int th_gtid, kmp_flag_64< C, S > *flag)
static int __kmp_init_runtime
static void __kmp_atfork_parent(void)
void __kmp_hidden_helper_main_thread_release()
template void __kmp_resume_32< false, false >(int, kmp_flag_32< false, false > *)
void __kmp_suspend_initialize(void)
kmp_uint64 __kmp_now_nsec()
kmp_uint64 __kmp_ticks_per_usec
int __kmp_is_address_mapped(void *addr)
#define TIMEVAL_TO_TIMESPEC(tv, ts)
double __kmp_read_cpu_time(void)
void(* microtask_t12)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_suspend_32(int th_gtid, kmp_flag_32< C, S > *flag)
void __kmp_hidden_helper_worker_thread_wait()
template void __kmp_resume_32< false, true >(int, kmp_flag_32< false, true > *)
void __kmp_disable(int *old_state)
static pthread_condattr_t __kmp_suspend_cond_attr
void __kmp_reap_monitor(kmp_info_t *th)
template void __kmp_atomic_suspend_64< true, false >(int, kmp_atomic_flag_64< true, false > *)
void __kmp_hidden_helper_threads_deinitz_wait()
kmp_uint64 __kmp_ticks_per_msec
template void __kmp_atomic_suspend_64< false, true >(int, kmp_atomic_flag_64< false, true > *)
int __kmp_gtid_get_specific()
void __kmp_hidden_helper_main_thread_wait()
void(* microtask_t4)(int *, int *, void *, void *, void *, void *)
static struct kmp_sys_timer __kmp_sys_timer_data
void(* microtask_t6)(int *, int *, void *, void *, void *, void *, void *, void *)
void(* microtask_t14)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void(* microtask_t11)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc, void *p_argv[])
void __kmp_hidden_helper_initz_release()
void __kmp_suspend_uninitialize_thread(kmp_info_t *th)
static int __kmp_fork_count
void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag)
void __kmp_lock_suspend_mx(kmp_info_t *th)
void __kmp_terminate_thread(int gtid)
void(* microtask_t2)(int *, int *, void *, void *)
void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size)
template void __kmp_resume_64< false, true >(int, kmp_flag_64< false, true > *)
static void __kmp_atfork_prepare(void)
int __kmp_read_from_file(char const *path, char const *format,...)
void __kmp_elapsed(double *t)
void(* microtask_t9)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_clear_system_time(void)
template void __kmp_suspend_64< false, true >(int, kmp_flag_64< false, true > *)
static void __kmp_resume_template(int target_gtid, C *flag)
void __kmp_runtime_destroy(void)
int __kmp_read_system_info(struct kmp_sys_info *info)
static void * __kmp_launch_worker(void *thr)
template void __kmp_suspend_32< false, false >(int, kmp_flag_32< false, false > *)
void(* microtask_t15)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag)
void(* microtask_t10)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_runtime_initialize(void)
int __kmp_try_suspend_mx(kmp_info_t *th)
void __kmp_resume_64(int target_gtid, kmp_flag_64< C, S > *flag)
void __kmp_resume_32(int target_gtid, kmp_flag_32< C, S > *flag)
void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64< C, S > *flag)
static kmp_cond_align_t __kmp_wait_cv
void __kmp_elapsed_tick(double *t)
void __kmp_gtid_set_specific(int gtid)
static int __kmp_get_xproc(void)
void(* microtask_t3)(int *, int *, void *, void *, void *)
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
void(* microtask_t8)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_suspend_initialize_thread(kmp_info_t *th)
void __kmp_exit_thread(int exit_status)
void __kmp_register_atfork(void)
void __kmp_reap_worker(kmp_info_t *th)
void __kmp_hidden_helper_threads_deinitz_release()
void __kmp_affinity_determine_capable(const char *env_var)
void __kmp_affinity_bind_thread(int proc)
int __kmp_get_load_balance(int max)
void __kmp_initialize_system_tick(void)