24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
31#include <sys/resource.h>
34#include <libperfstat.h>
36#include <sys/syscall.h>
43#include <sys/sysinfo.h>
59#include <sys/sysctl.h>
60#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
62#include <sys/sysctl.h>
64#include <pthread_np.h>
68#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
70#include <sys/sysctl.h>
75#include <pthread_np.h>
81#include <sys/loadavg.h>
92#ifndef TIMEVAL_TO_TIMESPEC
94#define TIMEVAL_TO_TIMESPEC(tv, ts) \
96 (ts)->tv_sec = (tv)->tv_sec; \
97 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
102#define TS2NS(timespec) \
103 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
107#if KMP_HANDLE_SIGNALS
108typedef void (*sig_func_t)(
int);
110static sigset_t __kmp_sigset;
127static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
128 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
129 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
130 cond->c_cond.__c_waiting);
134#if ((KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
136 KMP_AFFINITY_SUPPORTED)
142 "Illegal set affinity operation when not capable");
144 kmp_affin_mask_t *
mask;
145 KMP_CPU_ALLOC_ON_STACK(
mask);
147 KMP_CPU_SET(which,
mask);
148 __kmp_set_system_affinity(
mask,
TRUE);
149 KMP_CPU_FREE_FROM_STACK(
mask);
162 if (mask_size %
sizeof(__kmp_affin_mask_size))
163 mask_size +=
sizeof(__kmp_affin_mask_size) -
164 mask_size %
sizeof(__kmp_affin_mask_size);
165 KMP_AFFINITY_ENABLE(mask_size);
167 (
"__kmp_affinity_determine_capable: "
168 "AIX OS affinity interface bindprocessor functional (mask size = "
169 "%" KMP_SIZE_T_SPEC
").\n",
170 __kmp_affin_mask_size));
182#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
183#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
184#elif KMP_OS_FREEBSD || KMP_OS_DRAGONFLY
185#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
187#define KMP_CPU_SET_SIZE_LIMIT (256)
190 int verbose = __kmp_affinity.flags.verbose;
191 int warnings = __kmp_affinity.flags.warnings;
192 enum affinity_type
type = __kmp_affinity.type;
201 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE,
buf);
202 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
203 "initial getaffinity call returned %ld errno = %d\n",
206 if (gCode < 0 && errno != EINVAL) {
209 (warnings && (
type != affinity_none) && (
type != affinity_default) &&
210 (
type != affinity_disabled))) {
219 KMP_AFFINITY_DISABLE();
222 }
else if (gCode > 0) {
224 KMP_AFFINITY_ENABLE(gCode);
225 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
226 "affinity supported (mask size %d)\n",
227 (
int)__kmp_affin_mask_size));
234 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
235 "searching for proper set size\n"));
237 for (
size = 1;
size <= KMP_CPU_SET_SIZE_LIMIT;
size *= 2) {
238 gCode = syscall(__NR_sched_getaffinity, 0,
size,
buf);
239 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
240 "getaffinity for mask size %ld returned %ld errno = %d\n",
241 size, gCode, errno));
244 if (errno == ENOSYS) {
246 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
247 "inconsistent OS call behavior: errno == ENOSYS for mask "
251 (warnings && (
type != affinity_none) &&
252 (
type != affinity_default) && (
type != affinity_disabled))) {
261 KMP_AFFINITY_DISABLE();
268 KMP_AFFINITY_ENABLE(gCode);
269 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
270 "affinity supported (mask size %d)\n",
271 (
int)__kmp_affin_mask_size));
275#elif KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
279 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
280 reinterpret_cast<cpuset_t *
>(
buf));
281 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
282 "initial getaffinity call returned %d errno = %d\n",
285 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
286 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
287 "affinity supported (mask size %d)\n",
288 (
int)__kmp_affin_mask_size));
296 KMP_AFFINITY_DISABLE();
297 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
298 "cannot determine mask size - affinity not supported\n"));
299 if (verbose || (warnings && (
type != affinity_none) &&
300 (
type != affinity_default) && (
type != affinity_disabled))) {
310int __kmp_futex_determine_capable() {
312 long rc = syscall(__NR_futex, &
loc, FUTEX_WAKE, 1, NULL, NULL, 0);
313 int retval = (rc == 0) || (errno != ENOSYS);
316 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
317 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
318 retval ?
"" :
" not"));
325#if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
333 new_value = old_value |
d;
338 new_value = old_value |
d;
347 new_value = old_value &
d;
352 new_value = old_value &
d;
361 new_value = old_value |
d;
366 new_value = old_value |
d;
375 new_value = old_value &
d;
380 new_value = old_value &
d;
385#if KMP_ARCH_X86 || KMP_ARCH_WASM
390 new_value = old_value +
d;
395 new_value = old_value +
d;
404 new_value = old_value +
d;
409 new_value = old_value +
d;
419 new_value = old_value |
d;
423 new_value = old_value |
d;
432 new_value = old_value &
d;
436 new_value = old_value &
d;
450#ifdef KMP_CANCEL_THREADS
451 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
452 status = pthread_cancel(th->th.th_info.ds.ds_thread);
467#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
468 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
480 if ((
status = thr_stksegment(&
s)) < 0) {
486 KA_TRACE(60, (
"__kmp_set_stack_info: T#%d thr_stksegment returned size:"
487 " %lu, low addr: %p\n",
492 status = pthread_attr_init(&attr);
494#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
495 status = pthread_attr_get_np(pthread_self(), &attr);
498 status = pthread_getattr_np(pthread_self(), &attr);
504 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
505 " %lu, low addr: %p\n",
507 status = pthread_attr_destroy(&attr);
522 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
523 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
524 TCW_4(th->th.th_info.ds.ds_stackgrow,
TRUE);
529 int status, old_type, old_state;
530#ifdef KMP_BLOCK_SIGNALS
531 sigset_t new_set, old_set;
534#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
535 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
536 void *
volatile padding = 0;
540 gtid = ((
kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
547 __kmp_stats_thread_ptr = ((
kmp_info_t *)thr)->th.th_stats;
548 __kmp_stats_thread_ptr->startLife();
554 __kmp_itt_thread_name(gtid);
557#if KMP_AFFINITY_SUPPORTED
558 __kmp_affinity_bind_init_mask(gtid);
561#ifdef KMP_CANCEL_THREADS
562 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
565 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
569#if KMP_ARCH_X86 || KMP_ARCH_X86_64
571 __kmp_clear_x87_fpu_status_word();
572 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
573 __kmp_load_mxcsr(&__kmp_init_mxcsr);
576#ifdef KMP_BLOCK_SIGNALS
577 status = sigfillset(&new_set);
579 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
583#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
584 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
598#ifdef KMP_BLOCK_SIGNALS
599 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
609static void *__kmp_launch_monitor(
void *thr) {
610 int status, old_type, old_state;
611#ifdef KMP_BLOCK_SIGNALS
614 struct timespec interval;
618 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
630 __kmp_itt_thread_ignore();
638#ifdef KMP_CANCEL_THREADS
639 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
642 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
651 int sched = sched_getscheduler(0);
652 if (
sched == SCHED_FIFO ||
sched == SCHED_RR) {
655 struct sched_param param;
656 int max_priority = sched_get_priority_max(
sched);
659 sched_getparam(0, ¶m);
660 if (param.sched_priority < max_priority) {
661 param.sched_priority += 1;
662 rc = sched_setscheduler(0,
sched, ¶m);
688 if (__kmp_monitor_wakeups == 1) {
690 interval.tv_nsec = 0;
696 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
704 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
706 status = gettimeofday(&tval, NULL);
710 now.tv_sec += interval.tv_sec;
711 now.tv_nsec += interval.tv_nsec;
739 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
741#ifdef KMP_BLOCK_SIGNALS
742 status = sigfillset(&new_set);
744 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
748 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
756 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
767 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
774 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
782 pthread_attr_t thread_attr;
785 th->th.th_info.ds.ds_gtid = gtid;
796 th->th.th_stats = __kmp_stats_list->push_back(gtid);
800 th->th.th_stats = __kmp_stats_thread_ptr;
807 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
808 th->th.th_info.ds.ds_thread = pthread_self();
814 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
818#ifdef KMP_THREAD_ATTR
819 status = pthread_attr_init(&thread_attr);
823 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
837 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
838 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
841#ifdef _POSIX_THREAD_ATTR_STACKSIZE
842 status = pthread_attr_setstacksize(&thread_attr, stack_size);
843#ifdef KMP_BACKUP_STKSIZE
848 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
849 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
852 status = pthread_attr_setstacksize(&thread_attr, stack_size);
866 if (
status != 0 || !handle) {
867#ifdef _POSIX_THREAD_ATTR_STACKSIZE
886#if defined(LIBOMP_HAVE_PTHREAD_SET_NAME_NP)
887 pthread_set_name_np(handle,
"openmp_worker");
888#elif defined(LIBOMP_HAVE_PTHREAD_SETNAME_NP) && !KMP_OS_DARWIN
890 pthread_setname_np(handle,
"%s",
const_cast<char *
>(
"openmp_worker"));
892 pthread_setname_np(handle,
"openmp_worker");
897 th->th.th_info.ds.ds_thread = handle;
899#ifdef KMP_THREAD_ATTR
900 status = pthread_attr_destroy(&thread_attr);
913 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
920 pthread_attr_t thread_attr;
923 int auto_adj_size =
FALSE;
927 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
929 th->th.th_info.ds.ds_tid = 0;
930 th->th.th_info.ds.ds_gtid = 0;
933 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
946#ifdef KMP_THREAD_ATTR
947 if (__kmp_monitor_stksize == 0) {
948 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
949 auto_adj_size =
TRUE;
951 status = pthread_attr_init(&thread_attr);
955 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
960#ifdef _POSIX_THREAD_ATTR_STACKSIZE
961 status = pthread_attr_getstacksize(&thread_attr, &
size);
968 if (__kmp_monitor_stksize == 0) {
969 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
975 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
976 "requested stacksize = %lu bytes\n",
977 size, __kmp_monitor_stksize));
982#ifdef _POSIX_THREAD_ATTR_STACKSIZE
983 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
984 __kmp_monitor_stksize));
985 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
988 __kmp_monitor_stksize *= 2;
993 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
1002 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
1005#ifdef _POSIX_THREAD_ATTR_STACKSIZE
1007 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
1008 __kmp_monitor_stksize *= 2;
1028 th->th.th_info.ds.ds_thread = handle;
1030#if KMP_REAL_TIME_FIX
1038#ifdef KMP_THREAD_ATTR
1039 status = pthread_attr_destroy(&thread_attr);
1052 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
1053 th->th.th_info.ds.ds_thread));
1062 pthread_exit((
void *)(intptr_t)exit_status);
1067void __kmp_resume_monitor();
1073 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1075 th->th.th_info.ds.ds_thread));
1082 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1092 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1094 __kmp_resume_monitor();
1096 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1097 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1098 if (exit_val != th) {
1105 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1107 th->th.th_info.ds.ds_thread));
1124 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1126 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1132 if (exit_val != th) {
1133 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1135 th->th.th_info.ds.ds_gtid, exit_val));
1141 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1142 th->th.th_info.ds.ds_gtid));
1147#if KMP_HANDLE_SIGNALS
1149static void __kmp_null_handler(
int signo) {
1153static void __kmp_team_handler(
int signo) {
1191static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1192 struct sigaction *oldact) {
1193 int rc = sigaction(signum, act, oldact);
1197static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1198 int parallel_init) {
1201 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1202 if (parallel_init) {
1203 struct sigaction new_action;
1204 struct sigaction old_action;
1205 new_action.sa_handler = handler_func;
1206 new_action.sa_flags = 0;
1207 sigfillset(&new_action.sa_mask);
1208 __kmp_sigaction(sig, &new_action, &old_action);
1209 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1210 sigaddset(&__kmp_sigset, sig);
1213 __kmp_sigaction(sig, &old_action, NULL);
1217 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1222static void __kmp_remove_one_handler(
int sig) {
1223 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1224 if (sigismember(&__kmp_sigset, sig)) {
1225 struct sigaction old;
1227 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1228 if ((old.sa_handler != __kmp_team_handler) &&
1229 (old.sa_handler != __kmp_null_handler)) {
1231 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1232 "restoring: sig=%d\n",
1234 __kmp_sigaction(sig, &old, NULL);
1236 sigdelset(&__kmp_sigset, sig);
1241void __kmp_install_signals(
int parallel_init) {
1242 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1243 if (__kmp_handle_signals || !parallel_init) {
1246 sigemptyset(&__kmp_sigset);
1247 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1248 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1249 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1250 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1251 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1252 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1253 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1254 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1256 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1258 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1260 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1265void __kmp_remove_signals(
void) {
1267 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1268 for (sig = 1; sig < NSIG; ++sig) {
1269 __kmp_remove_one_handler(sig);
1276#ifdef KMP_CANCEL_THREADS
1278 status = pthread_setcancelstate(new_state, &old_state);
1285#ifdef KMP_CANCEL_THREADS
1287 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1314#if KMP_AFFINITY_SUPPORTED
1315#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
1319 kmp_set_thread_affinity_mask_initial();
1327 for (kmp_affinity_t *affinity : __kmp_affinities)
1328 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1329 __kmp_affin_fullMask =
nullptr;
1330 __kmp_affin_origMask =
nullptr;
1335 __kmp_init_monitor = 0;
1344#if !KMP_USE_DYNAMIC_LOCK
1360 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1366 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1428 if (old_value == new_value)
1432 &th->th.th_suspend_init_count, old_value, -1)) {
1439 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1442 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1455 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1459 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1463 --th->th.th_suspend_init_count;
1471 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1475 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1480 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1491 typename C::flag_t old_spin;
1493 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1500 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1501 th_gtid,
flag->get()));
1505 old_spin =
flag->set_sleeping();
1507 th->th.th_sleep_loc_type =
flag->get_type();
1510 flag->unset_sleeping();
1511 TCW_PTR(th->th.th_sleep_loc, NULL);
1516 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1518 th_gtid,
flag->get(),
flag->load(), old_spin));
1520 if (
flag->done_check_val(old_spin) ||
flag->done_check()) {
1521 flag->unset_sleeping();
1522 TCW_PTR(th->th.th_sleep_loc, NULL);
1524 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1526 th_gtid,
flag->get()));
1531 int deactivated =
FALSE;
1533 while (
flag->is_sleeping()) {
1536 __kmp_suspend_count++;
1537 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1538 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1544 th->th.th_active =
FALSE;
1545 if (th->th.th_active_in_pool) {
1546 th->th.th_active_in_pool =
FALSE;
1556#if USE_SUSPEND_TIMEOUT
1557 struct timespec now;
1558 struct timeval tval;
1561 status = gettimeofday(&tval, NULL);
1566 now.tv_sec += msecs / 1000;
1567 now.tv_nsec += (msecs % 1000) * 1000;
1569 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1570 "pthread_cond_timedwait\n",
1572 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1573 &th->th.th_suspend_mx.m_mutex, &now);
1575 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1576 " pthread_cond_wait\n",
1578 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1579 &th->th.th_suspend_mx.m_mutex);
1588 if (!
flag->is_sleeping() &&
1593 flag->unset_sleeping();
1594 TCW_PTR(th->th.th_sleep_loc, NULL);
1598 if (
status == ETIMEDOUT) {
1599 if (
flag->is_sleeping()) {
1601 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1603 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1606 TCW_PTR(th->th.th_sleep_loc, NULL);
1609 }
else if (
flag->is_sleeping()) {
1611 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1618 th->th.th_active =
TRUE;
1619 if (
TCR_4(th->th.th_in_pool)) {
1621 th->th.th_active_in_pool =
TRUE;
1627 TCW_PTR(th->th.th_sleep_loc, NULL);
1635 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1636 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1642 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1645template <
bool C,
bool S>
1649template <
bool C,
bool S>
1653template <
bool C,
bool S>
1682 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1683 gtid, target_gtid));
1690 if (!
flag ||
flag != th->th.th_sleep_loc) {
1693 flag = (
C *)
CCAST(
void *, th->th.th_sleep_loc);
1699 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1700 "awake: flag(%p)\n",
1701 gtid, target_gtid, (
void *)NULL));
1704 }
else if (
flag->get_type() != th->th.th_sleep_loc_type) {
1709 (
"__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1710 "spin(%p) type=%d ptr_type=%d\n",
1711 gtid, target_gtid,
flag,
flag->get(),
flag->get_type(),
1712 th->th.th_sleep_loc_type));
1718 if (!
flag->is_sleeping()) {
1719 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1720 "awake: flag(%p): %u\n",
1721 gtid, target_gtid,
flag->get(), (
unsigned int)
flag->load()));
1727 flag->unset_sleeping();
1728 TCW_PTR(th->th.th_sleep_loc, NULL);
1731 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1732 "sleep bit for flag's loc(%p): %u\n",
1733 gtid, target_gtid,
flag->get(), (
unsigned int)
flag->load()));
1738 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1739 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1740 target_gtid, buffer);
1743 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1746 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1748 gtid, target_gtid));
1751template <
bool C,
bool S>
1755template <
bool C,
bool S>
1759template <
bool C,
bool S>
1774void __kmp_resume_monitor() {
1779 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1789 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1797 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1809 (
void *)(intptr_t)(gtid + 1));
1812 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1819 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1820 "KMP_GTID_SHUTDOWN\n"));
1829 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1840 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1841 (
double)CLOCKS_PER_SEC;
1846 struct rusage r_usage;
1848 memset(info, 0,
sizeof(*info));
1850 status = getrusage(RUSAGE_SELF, &r_usage);
1855 info->
maxrss = r_usage.ru_maxrss;
1857 info->
minflt = r_usage.ru_minflt;
1859 info->
majflt = r_usage.ru_majflt;
1861 info->
nswap = r_usage.ru_nswap;
1863 info->
inblock = r_usage.ru_inblock;
1865 info->
oublock = r_usage.ru_oublock;
1867 info->
nvcsw = r_usage.ru_nvcsw;
1869 info->
nivcsw = r_usage.ru_nivcsw;
1877 struct timeval tval;
1878 struct timespec
stop;
1881 status = gettimeofday(&tval, NULL);
1885 *delta = (t_ns * 1e-9);
1889 struct timeval tval;
1891 status = gettimeofday(&tval, NULL);
1904#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1905 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1911 size_t len =
sizeof(
r);
1912 sysctlbyname(
"hw.logicalcpu", &
r, &len, NULL, 0);
1916#error "Unknown or unsupported OS."
1920 return r > 0 ?
r : 2;
1928 va_start(
args, format);
1929 FILE *
f = fopen(
path,
"rb");
1943 pthread_mutexattr_t mutex_attr;
1944 pthread_condattr_t cond_attr;
1950#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1951 if (!__kmp_cpuinfo.initialized) {
1952 __kmp_query_cpuid(&__kmp_cpuinfo);
1962 status = getrlimit(RLIMIT_STACK, &rlim);
1969 if (sysconf(_SC_THREADS)) {
2002 status = pthread_mutexattr_init(&mutex_attr);
2006 status = pthread_mutexattr_destroy(&mutex_attr);
2008 status = pthread_condattr_init(&cond_attr);
2012 status = pthread_condattr_destroy(&cond_attr);
2015 __kmp_itt_initialize();
2029 __kmp_itt_destroy();
2043#if KMP_AFFINITY_SUPPORTED
2044 __kmp_affinity_uninitialize();
2060 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2067 status = gettimeofday(&tv, NULL);
2080 gettimeofday(&t, NULL);
2086#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2096 diff = nsec2 - nsec;
2098 double tpus = 1000.0 * (
double)(
delay + (now - goal)) / (
double)diff;
2115#if KMP_OS_LINUX || KMP_OS_HURD
2123 file = fopen(
name,
"r");
2128 void *beginning = NULL;
2129 void *ending = NULL;
2132 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2140 if ((
addr >= beginning) && (
addr < ending)) {
2142 if (strcmp(perms,
"rw") == 0) {
2156 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2157 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2162 lstsz = lstsz * 4 / 3;
2164 rc = sysctl(mib, 4,
buf, &lstsz, NULL, 0);
2171 char *up =
buf + lstsz;
2174 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2175 size_t cursz = cur->kve_structsize;
2178 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2179 void *
end =
reinterpret_cast<void *
>(cur->kve_end);
2182 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2183 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2191#elif KMP_OS_DRAGONFLY
2192 char err[_POSIX2_LINE_MAX];
2196 vm_map_entry entry, *c;
2202 fd = kvm_openfiles(
nullptr,
nullptr,
nullptr, O_RDONLY,
err);
2207 proc = kvm_getprocs(fd, KERN_PROC_PID, getpid(), &num);
2209 if (kvm_read(fd,
static_cast<uintptr_t
>(proc->kp_paddr), &
p,
sizeof(
p)) !=
2211 kvm_read(fd,
reinterpret_cast<uintptr_t
>(
p.p_vmspace), &sp,
sizeof(sp)) !=
2219 uaddr =
reinterpret_cast<uintptr_t
>(
addr);
2220 for (c = kvm_vm_map_entry_first(fd, cur, &entry); c;
2221 c = kvm_vm_map_entry_next(fd, c, &entry)) {
2222 if ((uaddr >= entry.ba.start) && (uaddr <= entry.ba.end)) {
2223 if ((entry.protection & VM_PROT_READ) != 0 &&
2224 (entry.protection & VM_PROT_WRITE) != 0) {
2240 pid_t pid = getpid();
2241 struct ps_prochandle *fd = Pgrab(pid, PGRAB_RDONLY, &
err);
2249 size_t sz = (1 << 20);
2250 file = open(
name, O_RDONLY);
2258 while (sz > 0 && (
rd = pread(file,
buf, sz, 0)) == sz) {
2265 map =
reinterpret_cast<prmap_t *
>(
buf);
2266 uaddr =
reinterpret_cast<uintptr_t
>(
addr);
2268 for (cur = map;
rd > 0; cur++,
rd = -
sizeof(*map)) {
2269 if ((uaddr >= cur->pr_vaddr) && (uaddr < cur->pr_vaddr)) {
2270 if ((cur->pr_mflags & MA_READ) != 0 && (cur->pr_mflags & MA_WRITE) != 0) {
2287 rc = vm_read_overwrite(
2289 (vm_address_t)(
addr),
2291 (vm_address_t)(&buffer),
2304 mib[2] = VM_PROC_MAP;
2306 mib[4] =
sizeof(
struct kinfo_vmentry);
2309 rc = sysctl(mib, __arraycount(mib), NULL, &
size, NULL, 0);
2317 rc = sysctl(mib, __arraycount(mib), kiv, &
size, NULL, 0);
2321 for (
size_t i = 0;
i <
size;
i++) {
2322 if (kiv[
i].kve_start >= (uint64_t)
addr &&
2323 kiv[
i].kve_end <= (uint64_t)
addr) {
2333 mib[1] = KERN_PROC_VMMAP;
2338 rc = sysctl(mib, 3, NULL, &
size, NULL, 0);
2343 struct kinfo_vmentry kiv = {.kve_start = 0};
2345 while ((rc = sysctl(mib, 3, &kiv, &
size, NULL, 0)) == 0) {
2347 if (kiv.kve_end ==
end)
2350 if (kiv.kve_start >= (uint64_t)
addr && kiv.kve_end <= (uint64_t)
addr) {
2357 found = (
int)
addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2360 uint32_t loadQueryBufSize = 4096u;
2365 if (loadQueryBuf == NULL) {
2369 rc = loadquery(L_GETXINFO | L_IGNOREUNLOAD, loadQueryBuf, loadQueryBufSize);
2372 if (errno != ENOMEM) {
2376 loadQueryBufSize <<= 1;
2383 struct ld_xinfo *curLdInfo = (
struct ld_xinfo *)loadQueryBuf;
2387 uintptr_t curDataStart = (uintptr_t)curLdInfo->ldinfo_dataorg;
2388 uintptr_t curDataEnd = curDataStart + curLdInfo->ldinfo_datasize;
2391 if (curDataStart <= (uintptr_t)
addr && (uintptr_t)
addr < curDataEnd) {
2395 if (curLdInfo->ldinfo_next == 0u) {
2399 curLdInfo = (
struct ld_xinfo *)((
char *)curLdInfo + curLdInfo->ldinfo_next);
2405#error "Unknown or unsupported OS"
2413#ifdef USE_LOAD_BALANCE
2415#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2416 KMP_OS_OPENBSD || KMP_OS_SOLARIS
2427 int res = getloadavg(averages, 3);
2432 if (__kmp_load_balance_interval < 180 && (
res >= 1)) {
2433 ret_avg = (
int)averages[0];
2434 }
else if ((__kmp_load_balance_interval >= 180 &&
2435 __kmp_load_balance_interval < 600) &&
2437 ret_avg = (
int)averages[1];
2438 }
else if ((__kmp_load_balance_interval >= 600) && (
res == 3)) {
2439 ret_avg = (
int)averages[2];
2453 static int glb_running_threads = 0;
2455 static double glb_call_time = 0;
2456 int running_threads = 0;
2458 double call_time = 0.0;
2462 if (glb_call_time &&
2463 (call_time - glb_call_time < __kmp_load_balance_interval))
2464 return glb_running_threads;
2466 glb_call_time = call_time;
2473 int logical_cpus = perfstat_cpu(NULL, NULL,
sizeof(perfstat_cpu_t), 0);
2474 if (logical_cpus <= 0) {
2480 logical_cpus *
sizeof(perfstat_cpu_t));
2481 if (cpu_stat == NULL) {
2488 perfstat_id_t first_cpu_name;
2489 strcpy(first_cpu_name.name, FIRST_CPU);
2492 int rc = perfstat_cpu(&first_cpu_name, cpu_stat,
sizeof(perfstat_cpu_t),
2500 for (
int i = 0;
i < logical_cpus; ++
i) {
2501 running_threads += cpu_stat[
i].runque;
2502 if (running_threads >= max)
2510 if (running_threads <= 0)
2511 running_threads = 1;
2515 glb_running_threads = running_threads;
2517 return running_threads;
2527 static int permanent_error = 0;
2528 static int glb_running_threads = 0;
2530 static double glb_call_time = 0;
2532 int running_threads = 0;
2534 DIR *proc_dir = NULL;
2535 struct dirent *proc_entry = NULL;
2538 DIR *task_dir = NULL;
2540 int task_path_fixed_len;
2544 int stat_path_fixed_len;
2547 int total_processes = 0;
2550 double call_time = 0.0;
2557 if (glb_call_time &&
2558 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2559 running_threads = glb_running_threads;
2563 glb_call_time = call_time;
2566 if (permanent_error) {
2567 running_threads = -1;
2576 proc_dir = opendir(
"/proc");
2577 if (proc_dir == NULL) {
2580 running_threads = -1;
2581 permanent_error = 1;
2587 task_path_fixed_len = task_path.
used;
2589 proc_entry = readdir(proc_dir);
2590 while (proc_entry != NULL) {
2593 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2604 strcmp(proc_entry->d_name,
"1") == 0);
2607 task_path.
used = task_path_fixed_len;
2612 task_dir = opendir(task_path.
str);
2613 if (task_dir == NULL) {
2622 if (strcmp(proc_entry->d_name,
"1") == 0) {
2623 running_threads = -1;
2624 permanent_error = 1;
2632 stat_path_fixed_len = stat_path.
used;
2637 if (proc_entry->d_type == DT_DIR && isdigit(
task_entry->d_name[0])) {
2644 stat_path_fixed_len;
2651 stat_file = open(stat_path.
str, O_RDONLY);
2652 if (stat_file == -1) {
2682 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2689 char *close_parent = strstr(buffer,
") ");
2690 if (close_parent != NULL) {
2691 char state = *(close_parent + 2);
2694 if (running_threads >= max) {
2710 proc_entry = readdir(proc_dir);
2717 if (running_threads <= 0) {
2718 running_threads = 1;
2722 if (proc_dir != NULL) {
2726 if (task_dir != NULL) {
2730 if (stat_file != -1) {
2734 glb_running_threads = running_threads;
2736 return running_threads;
2744#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2745 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2746 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2747 KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF || \
2748 KMP_ARCH_AARCH64_32)
2764 void *,
void *,
void *);
2766 void *,
void *,
void *,
void *);
2768 void *,
void *,
void *,
void *,
void *);
2770 void *,
void *,
void *,
void *,
void *,
void *);
2772 void *,
void *,
void *,
void *,
void *,
void *,
2775 void *,
void *,
void *,
void *,
void *,
void *,
2778 void *,
void *,
void *,
void *,
void *,
void *,
2779 void *,
void *,
void *);
2781 void *,
void *,
void *,
void *,
void *,
void *,
2782 void *,
void *,
void *,
void *);
2784 void *,
void *,
void *,
void *,
void *,
void *,
2785 void *,
void *,
void *,
void *,
void *);
2793 void **exit_frame_ptr
2802 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2812 (*(
microtask_t2)pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2815 (*(
microtask_t3)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2818 (*(
microtask_t4)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2822 (*(
microtask_t5)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2823 p_argv[3], p_argv[4]);
2826 (*(
microtask_t6)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2827 p_argv[3], p_argv[4], p_argv[5]);
2830 (*(
microtask_t7)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2831 p_argv[3], p_argv[4], p_argv[5], p_argv[6]);
2834 (*(
microtask_t8)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2835 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2839 (*(
microtask_t9)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2840 p_argv[3], p_argv[4], p_argv[5], p_argv[6], p_argv[7],
2844 (*(
microtask_t10)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2845 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2846 p_argv[7], p_argv[8], p_argv[9]);
2849 (*(
microtask_t11)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2850 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2851 p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2854 (*(
microtask_t12)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2855 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2856 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2860 (*(
microtask_t13)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2861 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2862 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2863 p_argv[11], p_argv[12]);
2866 (*(
microtask_t14)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2867 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2868 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2869 p_argv[11], p_argv[12], p_argv[13]);
2872 (*(
microtask_t15)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2873 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2874 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2875 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2888pthread_cond_t hidden_helper_threads_initz_cond_var;
2889pthread_mutex_t hidden_helper_threads_initz_lock;
2890volatile int hidden_helper_initz_signaled =
FALSE;
2893pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2894pthread_mutex_t hidden_helper_threads_deinitz_lock;
2895volatile int hidden_helper_deinitz_signaled =
FALSE;
2898pthread_cond_t hidden_helper_main_thread_cond_var;
2899pthread_mutex_t hidden_helper_main_thread_lock;
2900volatile int hidden_helper_main_thread_signaled =
FALSE;
2905sem_t hidden_helper_task_sem;
2909 int status = sem_wait(&hidden_helper_task_sem);
2916 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2919 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2922 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2925 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2928 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2931 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2935 status = sem_init(&hidden_helper_task_sem, 0, 0);
2942 [](
void *) ->
void * {
2953 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2956 if (!
TCR_4(hidden_helper_initz_signaled)) {
2957 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2958 &hidden_helper_threads_initz_lock);
2962 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2968 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2971 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2976 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2983 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2986 if (!
TCR_4(hidden_helper_main_thread_signaled)) {
2987 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2988 &hidden_helper_main_thread_lock);
2992 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2999 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
3002 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
3008 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
3013 int status = sem_post(&hidden_helper_task_sem);
3020 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
3023 if (!
TCR_4(hidden_helper_deinitz_signaled)) {
3024 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
3025 &hidden_helper_threads_deinitz_lock);
3029 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
3034 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
3037 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
3042 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
3047 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3051 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3055 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3059 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3063 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3067 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3071 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3075 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3079 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
3084 DIR *dir = opendir(
"/dev/shm");
3088 }
else if (ENOENT == errno) {
3096 DIR *dir = opendir(
"/tmp");
3100 }
else if (ENOENT == errno) {
int task_entry(kmp_int32 gtid, kmp_task_t *task)
__itt_string_handle * name
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope ITT_FORMAT d const __itt_domain __itt_scope __itt_string_handle const char size_t ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_relation __itt_id ITT_FORMAT lu __itt_track_group __itt_string_handle __itt_track_group_type ITT_FORMAT d __itt_track ITT_FORMAT p void int const int int const char int ITT_FORMAT d void void const char * path
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
volatile kmp_team_t * __kmp_team_pool
int __kmp_generate_warnings
volatile int __kmp_init_user_locks
#define KMP_INTERNAL_MALLOC(sz)
kmp_bootstrap_lock_t __kmp_initz_lock
kmp_global_t __kmp_global
kmp_pause_status_t __kmp_pause_status
#define KMP_MAX_BLOCKTIME
void __kmp_check_stack_overlap(kmp_info_t *thr)
#define KMP_INTERNAL_REALLOC(p, sz)
volatile kmp_info_t * __kmp_thread_pool
volatile int __kmp_init_gtid
kmp_bootstrap_lock_t __kmp_task_team_lock
kmp_nested_proc_bind_t __kmp_nested_proc_bind
#define KMP_GTID_SHUTDOWN
kmp_cached_addr_t * __kmp_threadpriv_cache_list
volatile int __kmp_all_nth
void __kmp_check_stksize(size_t *val)
volatile int __kmp_init_common
static bool KMP_UBER_GTID(int gtid)
#define KMP_DEFAULT_STKSIZE
volatile int __kmp_init_middle
std::atomic< int > __kmp_thread_pool_active_nth
void __kmp_hidden_helper_threads_initz_routine()
volatile int __kmp_need_register_serial
kmp_bootstrap_lock_t __kmp_forkjoin_lock
void * __kmp_launch_thread(kmp_info_t *thr)
kmp_info_t ** __kmp_threads
int __kmp_need_register_atfork
void __kmp_internal_end_dest(void *)
void __kmp_unregister_library(void)
volatile int __kmp_init_parallel
kmp_key_t __kmp_gtid_threadprivate_key
kmp_info_t * __kmp_thread_pool_insert_pt
kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker)
size_t __kmp_sys_min_stksize
#define KMP_INTERNAL_FREE(p)
int __kmp_threads_capacity
void __kmp_serial_initialize(void)
kmp_uint32 __kmp_wait_4(kmp_uint32 volatile *spinner, kmp_uint32 checker, kmp_uint32(*pred)(kmp_uint32, kmp_uint32), void *obj)
volatile int __kmp_init_serial
static void __kmp_type_convert(T1 src, T2 *dest)
union KMP_ALIGN_CACHE kmp_info kmp_info_t
kmp_uint64 __kmp_hardware_timestamp(void)
kmp_topology_t * __kmp_topology
KMP_ARCH_X86 KMP_ARCH_X86 long double
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int8
void __kmp_dump_debug_buffer(void)
void __kmp_debug_printf(char const *format,...)
#define KMP_DEBUG_ASSERT(cond)
#define KMP_ASSERT2(cond, msg)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
static void __kmp_msg(kmp_msg_severity_t severity, kmp_msg_t message, va_list ap)
void __kmp_fatal(kmp_msg_t message,...)
#define KMP_CHECK_SYSFAIL(func, error)
#define KMP_CHECK_SYSFAIL_ERRNO(func, status)
#define KMP_SYSFAIL(func, error)
kmp_bootstrap_lock_t __kmp_console_lock
kmp_bootstrap_lock_t __kmp_stdio_lock
void __kmp_printf(char const *format,...)
int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
kmp_block_of_locks * __kmp_lock_blocks
int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
kmp_lock_table_t __kmp_user_lock_table
static void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck)
static int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck)
static void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck)
void(* microtask_t)(int *gtid, int *npr,...)
#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv)
#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv)
#define KMP_ATOMIC_ST_REL(p, v)
bool __kmp_atomic_compare_store(std::atomic< T > *p, T expected, T desired)
#define KMP_ATOMIC_LD_ACQ(p)
#define STATIC_EFI2_WORKAROUND
#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv)
#define KMP_ATOMIC_DEC(p)
#define KMP_ATOMIC_LD_RLX(p)
#define KMP_ATOMIC_INC(p)
Functions for collecting statistics.
#define KMP_INIT_PARTITIONED_TIMERS(name)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n)
#define KMP_SET_THREAD_STATE(state_name)
void __kmp_str_buf_clear(kmp_str_buf_t *buffer)
void __kmp_str_buf_free(kmp_str_buf_t *buffer)
char * __kmp_str_format(char const *format,...)
void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, size_t len)
void __kmp_str_free(char **str)
#define __kmp_str_buf_init(b)
static void __kmp_null_resume_wrapper(kmp_info_t *thr)
#define OMPT_GET_FRAME_ADDRESS(level)
struct kmp_cached_addr * next
kmp_lock_index_t allocated
kmp_proc_bind_t * bind_types
void(* microtask_t13)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
static void __kmp_atfork_child(void)
void __kmp_hidden_helper_worker_thread_signal()
template void __kmp_atomic_resume_64< false, true >(int, kmp_atomic_flag_64< false, true > *)
template void __kmp_suspend_64< true, false >(int, kmp_flag_64< true, false > *)
void(* microtask_t5)(int *, int *, void *, void *, void *, void *, void *)
static kmp_mutex_align_t __kmp_wait_mx
void __kmp_read_system_time(double *delta)
void(* microtask_t0)(int *, int *)
void __kmp_hidden_helper_threads_initz_wait()
static void __kmp_suspend_template(int th_gtid, C *flag)
void __kmp_enable(int new_state)
void __kmp_unlock_suspend_mx(kmp_info_t *th)
void(* microtask_t7)(int *, int *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_do_initialize_hidden_helper_threads()
static pthread_mutexattr_t __kmp_suspend_mutex_attr
void(* microtask_t1)(int *, int *, void *)
void __kmp_thread_sleep(int millis)
static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th)
void __kmp_suspend_64(int th_gtid, kmp_flag_64< C, S > *flag)
static int __kmp_init_runtime
static void __kmp_atfork_parent(void)
void __kmp_hidden_helper_main_thread_release()
template void __kmp_resume_32< false, false >(int, kmp_flag_32< false, false > *)
void __kmp_suspend_initialize(void)
kmp_uint64 __kmp_now_nsec()
kmp_uint64 __kmp_ticks_per_usec
int __kmp_is_address_mapped(void *addr)
#define TIMEVAL_TO_TIMESPEC(tv, ts)
double __kmp_read_cpu_time(void)
void(* microtask_t12)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_suspend_32(int th_gtid, kmp_flag_32< C, S > *flag)
void __kmp_hidden_helper_worker_thread_wait()
template void __kmp_resume_32< false, true >(int, kmp_flag_32< false, true > *)
void __kmp_disable(int *old_state)
static pthread_condattr_t __kmp_suspend_cond_attr
void __kmp_reap_monitor(kmp_info_t *th)
template void __kmp_atomic_suspend_64< true, false >(int, kmp_atomic_flag_64< true, false > *)
void __kmp_hidden_helper_threads_deinitz_wait()
kmp_uint64 __kmp_ticks_per_msec
template void __kmp_atomic_suspend_64< false, true >(int, kmp_atomic_flag_64< false, true > *)
int __kmp_gtid_get_specific()
void __kmp_hidden_helper_main_thread_wait()
void(* microtask_t4)(int *, int *, void *, void *, void *, void *)
static struct kmp_sys_timer __kmp_sys_timer_data
void(* microtask_t6)(int *, int *, void *, void *, void *, void *, void *, void *)
void(* microtask_t14)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void(* microtask_t11)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc, void *p_argv[])
void __kmp_hidden_helper_initz_release()
void __kmp_suspend_uninitialize_thread(kmp_info_t *th)
static int __kmp_fork_count
void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag)
void __kmp_lock_suspend_mx(kmp_info_t *th)
void __kmp_terminate_thread(int gtid)
void(* microtask_t2)(int *, int *, void *, void *)
void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size)
template void __kmp_resume_64< false, true >(int, kmp_flag_64< false, true > *)
static void __kmp_atfork_prepare(void)
int __kmp_read_from_file(char const *path, char const *format,...)
void __kmp_elapsed(double *t)
void(* microtask_t9)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_clear_system_time(void)
template void __kmp_suspend_64< false, true >(int, kmp_flag_64< false, true > *)
static void __kmp_resume_template(int target_gtid, C *flag)
void __kmp_runtime_destroy(void)
int __kmp_read_system_info(struct kmp_sys_info *info)
static void * __kmp_launch_worker(void *thr)
template void __kmp_suspend_32< false, false >(int, kmp_flag_32< false, false > *)
void(* microtask_t15)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag)
void(* microtask_t10)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_runtime_initialize(void)
int __kmp_try_suspend_mx(kmp_info_t *th)
void __kmp_resume_64(int target_gtid, kmp_flag_64< C, S > *flag)
void __kmp_resume_32(int target_gtid, kmp_flag_32< C, S > *flag)
void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64< C, S > *flag)
static kmp_cond_align_t __kmp_wait_cv
void __kmp_elapsed_tick(double *t)
void __kmp_gtid_set_specific(int gtid)
static int __kmp_get_xproc(void)
void(* microtask_t3)(int *, int *, void *, void *, void *)
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
void(* microtask_t8)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_suspend_initialize_thread(kmp_info_t *th)
void __kmp_exit_thread(int exit_status)
void __kmp_register_atfork(void)
void __kmp_reap_worker(kmp_info_t *th)
void __kmp_hidden_helper_threads_deinitz_release()
void __kmp_affinity_determine_capable(const char *env_var)
void __kmp_affinity_bind_thread(int proc)
int __kmp_get_load_balance(int max)
void __kmp_initialize_system_tick(void)