LLVM OpenMP 19.0.0git
z_Linux_util.cpp
Go to the documentation of this file.
1/*
2 * z_Linux_util.cpp -- platform specific routines.
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13#include "kmp.h"
14#include "kmp_affinity.h"
15#include "kmp_i18n.h"
16#include "kmp_io.h"
17#include "kmp_itt.h"
18#include "kmp_lock.h"
19#include "kmp_stats.h"
20#include "kmp_str.h"
21#include "kmp_wait_release.h"
22#include "kmp_wrapper_getpid.h"
23
24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25#include <alloca.h>
26#endif
27#include <math.h> // HUGE_VAL.
28#if KMP_OS_LINUX
29#include <semaphore.h>
30#endif // KMP_OS_LINUX
31#include <sys/resource.h>
32#if KMP_OS_AIX
33#include <sys/ldr.h>
34#include <libperfstat.h>
35#else
36#include <sys/syscall.h>
37#endif
38#include <sys/time.h>
39#include <sys/times.h>
40#include <unistd.h>
41
42#if KMP_OS_LINUX
43#include <sys/sysinfo.h>
44#if KMP_USE_FUTEX
45// We should really include <futex.h>, but that causes compatibility problems on
46// different Linux* OS distributions that either require that you include (or
47// break when you try to include) <pci/types.h>. Since all we need is the two
48// macros below (which are part of the kernel ABI, so can't change) we just
49// define the constants here and don't include <futex.h>
50#ifndef FUTEX_WAIT
51#define FUTEX_WAIT 0
52#endif
53#ifndef FUTEX_WAKE
54#define FUTEX_WAKE 1
55#endif
56#endif
57#elif KMP_OS_DARWIN
58#include <mach/mach.h>
59#include <sys/sysctl.h>
60#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
61#include <sys/types.h>
62#include <sys/sysctl.h>
63#include <sys/user.h>
64#include <pthread_np.h>
65#if KMP_OS_DRAGONFLY
66#include <kvm.h>
67#endif
68#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
69#include <sys/types.h>
70#include <sys/sysctl.h>
71#if KMP_OS_NETBSD
72#include <sched.h>
73#endif
74#elif KMP_OS_SOLARIS
75#include <libproc.h>
76#include <procfs.h>
77#include <thread.h>
78#include <sys/loadavg.h>
79#endif
80
81#include <ctype.h>
82#include <dirent.h>
83#include <fcntl.h>
84
86 struct timespec start;
87};
88
89#ifndef TIMEVAL_TO_TIMESPEC
90// Convert timeval to timespec.
91#define TIMEVAL_TO_TIMESPEC(tv, ts) \
92 do { \
93 (ts)->tv_sec = (tv)->tv_sec; \
94 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
95 } while (0)
96#endif
97
98// Convert timespec to nanoseconds.
99#define TS2NS(timespec) \
100 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
101
103
104#if KMP_HANDLE_SIGNALS
105typedef void (*sig_func_t)(int);
106STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
107static sigset_t __kmp_sigset;
108#endif
109
111
112static int __kmp_fork_count = 0;
113
114static pthread_condattr_t __kmp_suspend_cond_attr;
115static pthread_mutexattr_t __kmp_suspend_mutex_attr;
116
117static kmp_cond_align_t __kmp_wait_cv;
118static kmp_mutex_align_t __kmp_wait_mx;
119
122
123#ifdef DEBUG_SUSPEND
124static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
125 KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
126 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
127 cond->c_cond.__c_waiting);
128}
129#endif
130
131#if ((KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
132 KMP_OS_AIX) && \
133 KMP_AFFINITY_SUPPORTED)
134
135/* Affinity support */
136
137void __kmp_affinity_bind_thread(int which) {
138 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
139 "Illegal set affinity operation when not capable");
140
141 kmp_affin_mask_t *mask;
142 KMP_CPU_ALLOC_ON_STACK(mask);
143 KMP_CPU_ZERO(mask);
144 KMP_CPU_SET(which, mask);
145 __kmp_set_system_affinity(mask, TRUE);
146 KMP_CPU_FREE_FROM_STACK(mask);
147}
148
149#if KMP_OS_AIX
150void __kmp_affinity_determine_capable(const char *env_var) {
151 // All versions of AIX support bindprocessor().
152
153 size_t mask_size = __kmp_xproc / CHAR_BIT;
154 // Round up to byte boundary.
155 if (__kmp_xproc % CHAR_BIT)
156 ++mask_size;
157
158 // Round up to the mask_size_type boundary.
159 if (mask_size % sizeof(__kmp_affin_mask_size))
160 mask_size += sizeof(__kmp_affin_mask_size) -
161 mask_size % sizeof(__kmp_affin_mask_size);
162 KMP_AFFINITY_ENABLE(mask_size);
163 KA_TRACE(10,
164 ("__kmp_affinity_determine_capable: "
165 "AIX OS affinity interface bindprocessor functional (mask size = "
166 "%" KMP_SIZE_T_SPEC ").\n",
167 __kmp_affin_mask_size));
168}
169
170#else // !KMP_OS_AIX
171
172/* Determine if we can access affinity functionality on this version of
173 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
174 * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
175void __kmp_affinity_determine_capable(const char *env_var) {
176 // Check and see if the OS supports thread affinity.
177
178#if KMP_OS_LINUX
179#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
180#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
181#elif KMP_OS_FREEBSD || KMP_OS_DRAGONFLY
182#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
183#elif KMP_OS_NETBSD
184#define KMP_CPU_SET_SIZE_LIMIT (256)
185#endif
186
187 int verbose = __kmp_affinity.flags.verbose;
188 int warnings = __kmp_affinity.flags.warnings;
189 enum affinity_type type = __kmp_affinity.type;
190
191#if KMP_OS_LINUX
192 long gCode;
193 unsigned char *buf;
194 buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
195
196 // If the syscall returns a suggestion for the size,
197 // then we don't have to search for an appropriate size.
198 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
199 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
200 "initial getaffinity call returned %ld errno = %d\n",
201 gCode, errno));
202
203 if (gCode < 0 && errno != EINVAL) {
204 // System call not supported
205 if (verbose ||
206 (warnings && (type != affinity_none) && (type != affinity_default) &&
207 (type != affinity_disabled))) {
208 int error = errno;
209 kmp_msg_t err_code = KMP_ERR(error);
210 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
211 err_code, __kmp_msg_null);
213 __kmp_str_free(&err_code.str);
214 }
215 }
216 KMP_AFFINITY_DISABLE();
218 return;
219 } else if (gCode > 0) {
220 // The optimal situation: the OS returns the size of the buffer it expects.
221 KMP_AFFINITY_ENABLE(gCode);
222 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
223 "affinity supported (mask size %d)\n",
224 (int)__kmp_affin_mask_size));
226 return;
227 }
228
229 // Call the getaffinity system call repeatedly with increasing set sizes
230 // until we succeed, or reach an upper bound on the search.
231 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
232 "searching for proper set size\n"));
233 int size;
234 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
235 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
236 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
237 "getaffinity for mask size %ld returned %ld errno = %d\n",
238 size, gCode, errno));
239
240 if (gCode < 0) {
241 if (errno == ENOSYS) {
242 // We shouldn't get here
243 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
244 "inconsistent OS call behavior: errno == ENOSYS for mask "
245 "size %d\n",
246 size));
247 if (verbose ||
248 (warnings && (type != affinity_none) &&
249 (type != affinity_default) && (type != affinity_disabled))) {
250 int error = errno;
251 kmp_msg_t err_code = KMP_ERR(error);
252 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
253 err_code, __kmp_msg_null);
255 __kmp_str_free(&err_code.str);
256 }
257 }
258 KMP_AFFINITY_DISABLE();
260 return;
261 }
262 continue;
263 }
264
265 KMP_AFFINITY_ENABLE(gCode);
266 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
267 "affinity supported (mask size %d)\n",
268 (int)__kmp_affin_mask_size));
270 return;
271 }
272#elif KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
273 long gCode;
274 unsigned char *buf;
275 buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
276 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
277 reinterpret_cast<cpuset_t *>(buf));
278 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
279 "initial getaffinity call returned %d errno = %d\n",
280 gCode, errno));
281 if (gCode == 0) {
282 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
283 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
284 "affinity supported (mask size %d)\n",
285 (int)__kmp_affin_mask_size));
287 return;
288 }
289#endif
291
292 // Affinity is not supported
293 KMP_AFFINITY_DISABLE();
294 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
295 "cannot determine mask size - affinity not supported\n"));
296 if (verbose || (warnings && (type != affinity_none) &&
297 (type != affinity_default) && (type != affinity_disabled))) {
298 KMP_WARNING(AffCantGetMaskSize, env_var);
299 }
300}
301#endif // KMP_OS_AIX
302#endif // (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
303 KMP_OS_DRAGONFLY || KMP_OS_AIX) && KMP_AFFINITY_SUPPORTED
304
305#if KMP_USE_FUTEX
306
307int __kmp_futex_determine_capable() {
308 int loc = 0;
309 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
310 int retval = (rc == 0) || (errno != ENOSYS);
311
312 KA_TRACE(10,
313 ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
314 KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
315 retval ? "" : " not"));
316
317 return retval;
318}
319
320#endif // KMP_USE_FUTEX
321
322#if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
323/* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
324 use compare_and_store for these routines */
325
326kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
327 kmp_int8 old_value, new_value;
328
329 old_value = TCR_1(*p);
330 new_value = old_value | d;
331
332 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
334 old_value = TCR_1(*p);
335 new_value = old_value | d;
336 }
337 return old_value;
338}
339
340kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
341 kmp_int8 old_value, new_value;
342
343 old_value = TCR_1(*p);
344 new_value = old_value & d;
345
346 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
348 old_value = TCR_1(*p);
349 new_value = old_value & d;
350 }
351 return old_value;
352}
353
354kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
355 kmp_uint32 old_value, new_value;
356
357 old_value = TCR_4(*p);
358 new_value = old_value | d;
359
360 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
362 old_value = TCR_4(*p);
363 new_value = old_value | d;
364 }
365 return old_value;
366}
367
368kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
369 kmp_uint32 old_value, new_value;
370
371 old_value = TCR_4(*p);
372 new_value = old_value & d;
373
374 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
376 old_value = TCR_4(*p);
377 new_value = old_value & d;
378 }
379 return old_value;
380}
381
382#if KMP_ARCH_X86 || KMP_ARCH_WASM
383kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
384 kmp_int8 old_value, new_value;
385
386 old_value = TCR_1(*p);
387 new_value = old_value + d;
388
389 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
391 old_value = TCR_1(*p);
392 new_value = old_value + d;
393 }
394 return old_value;
395}
396
397kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
398 kmp_int64 old_value, new_value;
399
400 old_value = TCR_8(*p);
401 new_value = old_value + d;
402
403 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
405 old_value = TCR_8(*p);
406 new_value = old_value + d;
407 }
408 return old_value;
409}
410#endif /* KMP_ARCH_X86 */
411
412kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
413 kmp_uint64 old_value, new_value;
414
415 old_value = TCR_8(*p);
416 new_value = old_value | d;
417 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
419 old_value = TCR_8(*p);
420 new_value = old_value | d;
421 }
422 return old_value;
423}
424
425kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
426 kmp_uint64 old_value, new_value;
427
428 old_value = TCR_8(*p);
429 new_value = old_value & d;
430 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
432 old_value = TCR_8(*p);
433 new_value = old_value & d;
434 }
435 return old_value;
436}
437
438#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
439
441 int status;
442 kmp_info_t *th = __kmp_threads[gtid];
443
444 if (!th)
445 return;
446
447#ifdef KMP_CANCEL_THREADS
448 KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
449 status = pthread_cancel(th->th.th_info.ds.ds_thread);
450 if (status != 0 && status != ESRCH) {
451 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
453 }
454#endif
456} //
457
458/* Set thread stack info.
459 If values are unreasonable, assume call failed and use incremental stack
460 refinement method instead. Returns TRUE if the stack parameters could be
461 determined exactly, FALSE if incremental refinement is necessary. */
463 int stack_data;
464#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
465 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
466 int status;
467 size_t size = 0;
468 void *addr = 0;
469
470 /* Always do incremental stack refinement for ubermaster threads since the
471 initial thread stack range can be reduced by sibling thread creation so
472 pthread_attr_getstack may cause thread gtid aliasing */
473 if (!KMP_UBER_GTID(gtid)) {
474
475#if KMP_OS_SOLARIS
476 stack_t s;
477 if ((status = thr_stksegment(&s)) < 0) {
478 KMP_CHECK_SYSFAIL("thr_stksegment", status);
479 }
480
481 addr = s.ss_sp;
482 size = s.ss_size;
483 KA_TRACE(60, ("__kmp_set_stack_info: T#%d thr_stksegment returned size:"
484 " %lu, low addr: %p\n",
485 gtid, size, addr));
486#else
487 pthread_attr_t attr;
488 /* Fetch the real thread attributes */
489 status = pthread_attr_init(&attr);
490 KMP_CHECK_SYSFAIL("pthread_attr_init", status);
491#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
492 status = pthread_attr_get_np(pthread_self(), &attr);
493 KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
494#else
495 status = pthread_getattr_np(pthread_self(), &attr);
496 KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
497#endif
498 status = pthread_attr_getstack(&attr, &addr, &size);
499 KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
500 KA_TRACE(60,
501 ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
502 " %lu, low addr: %p\n",
503 gtid, size, addr));
504 status = pthread_attr_destroy(&attr);
505 KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
506#endif
507 }
508
509 if (size != 0 && addr != 0) { // was stack parameter determination successful?
510 /* Store the correct base and size */
511 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
512 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
513 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
514 return TRUE;
515 }
516#endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD \
517 || KMP_OS_HURD || KMP_OS_SOLARIS */
518 /* Use incremental refinement starting from initial conservative estimate */
519 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
520 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
521 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
522 return FALSE;
523}
524
525static void *__kmp_launch_worker(void *thr) {
526 int status, old_type, old_state;
527#ifdef KMP_BLOCK_SIGNALS
528 sigset_t new_set, old_set;
529#endif /* KMP_BLOCK_SIGNALS */
530 void *exit_val;
531#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
532 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
533 void *volatile padding = 0;
534#endif
535 int gtid;
536
537 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
539#ifdef KMP_TDATA_GTID
540 __kmp_gtid = gtid;
541#endif
542#if KMP_STATS_ENABLED
543 // set thread local index to point to thread-specific stats
544 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
545 __kmp_stats_thread_ptr->startLife();
548#endif
549
550#if USE_ITT_BUILD
551 __kmp_itt_thread_name(gtid);
552#endif /* USE_ITT_BUILD */
553
554#if KMP_AFFINITY_SUPPORTED
555 __kmp_affinity_bind_init_mask(gtid);
556#endif
557
558#ifdef KMP_CANCEL_THREADS
559 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
560 KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
561 // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
562 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
563 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
564#endif
565
566#if KMP_ARCH_X86 || KMP_ARCH_X86_64
567 // Set FP control regs to be a copy of the parallel initialization thread's.
568 __kmp_clear_x87_fpu_status_word();
569 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
570 __kmp_load_mxcsr(&__kmp_init_mxcsr);
571#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
572
573#ifdef KMP_BLOCK_SIGNALS
574 status = sigfillset(&new_set);
575 KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
576 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
577 KMP_CHECK_SYSFAIL("pthread_sigmask", status);
578#endif /* KMP_BLOCK_SIGNALS */
579
580#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
581 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
582 if (__kmp_stkoffset > 0 && gtid > 0) {
583 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
584 (void)padding;
585 }
586#endif
587
588 KMP_MB();
589 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
590
592
593 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
594
595#ifdef KMP_BLOCK_SIGNALS
596 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
597 KMP_CHECK_SYSFAIL("pthread_sigmask", status);
598#endif /* KMP_BLOCK_SIGNALS */
599
600 return exit_val;
601}
602
603#if KMP_USE_MONITOR
604/* The monitor thread controls all of the threads in the complex */
605
606static void *__kmp_launch_monitor(void *thr) {
607 int status, old_type, old_state;
608#ifdef KMP_BLOCK_SIGNALS
609 sigset_t new_set;
610#endif /* KMP_BLOCK_SIGNALS */
611 struct timespec interval;
612
613 KMP_MB(); /* Flush all pending memory write invalidates. */
614
615 KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
616
617 /* register us as the monitor thread */
619#ifdef KMP_TDATA_GTID
620 __kmp_gtid = KMP_GTID_MONITOR;
621#endif
622
623 KMP_MB();
624
625#if USE_ITT_BUILD
626 // Instruct Intel(R) Threading Tools to ignore monitor thread.
627 __kmp_itt_thread_ignore();
628#endif /* USE_ITT_BUILD */
629
630 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
631 (kmp_info_t *)thr);
632
634
635#ifdef KMP_CANCEL_THREADS
636 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
637 KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
638 // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
639 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
640 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
641#endif
642
643#if KMP_REAL_TIME_FIX
644 // This is a potential fix which allows application with real-time scheduling
645 // policy work. However, decision about the fix is not made yet, so it is
646 // disabled by default.
647 { // Are program started with real-time scheduling policy?
648 int sched = sched_getscheduler(0);
649 if (sched == SCHED_FIFO || sched == SCHED_RR) {
650 // Yes, we are a part of real-time application. Try to increase the
651 // priority of the monitor.
652 struct sched_param param;
653 int max_priority = sched_get_priority_max(sched);
654 int rc;
655 KMP_WARNING(RealTimeSchedNotSupported);
656 sched_getparam(0, &param);
657 if (param.sched_priority < max_priority) {
658 param.sched_priority += 1;
659 rc = sched_setscheduler(0, sched, &param);
660 if (rc != 0) {
661 int error = errno;
662 kmp_msg_t err_code = KMP_ERR(error);
663 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
664 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
666 __kmp_str_free(&err_code.str);
667 }
668 }
669 } else {
670 // We cannot abort here, because number of CPUs may be enough for all
671 // the threads, including the monitor thread, so application could
672 // potentially work...
673 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
674 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
676 }
677 }
678 // AC: free thread that waits for monitor started
679 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
680 }
681#endif // KMP_REAL_TIME_FIX
682
683 KMP_MB(); /* Flush all pending memory write invalidates. */
684
685 if (__kmp_monitor_wakeups == 1) {
686 interval.tv_sec = 1;
687 interval.tv_nsec = 0;
688 } else {
689 interval.tv_sec = 0;
690 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
691 }
692
693 KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
694
695 while (!TCR_4(__kmp_global.g.g_done)) {
696 struct timespec now;
697 struct timeval tval;
698
699 /* This thread monitors the state of the system */
700
701 KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
702
703 status = gettimeofday(&tval, NULL);
704 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
705 TIMEVAL_TO_TIMESPEC(&tval, &now);
706
707 now.tv_sec += interval.tv_sec;
708 now.tv_nsec += interval.tv_nsec;
709
710 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
711 now.tv_sec += 1;
712 now.tv_nsec -= KMP_NSEC_PER_SEC;
713 }
714
715 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
716 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
717 // AC: the monitor should not fall asleep if g_done has been set
718 if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
719 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
720 &__kmp_wait_mx.m_mutex, &now);
721 if (status != 0) {
722 if (status != ETIMEDOUT && status != EINTR) {
723 KMP_SYSFAIL("pthread_cond_timedwait", status);
724 }
725 }
726 }
727 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
728 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
729
730 TCW_4(__kmp_global.g.g_time.dt.t_value,
731 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
732
733 KMP_MB(); /* Flush all pending memory write invalidates. */
734 }
735
736 KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
737
738#ifdef KMP_BLOCK_SIGNALS
739 status = sigfillset(&new_set);
740 KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
741 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
742 KMP_CHECK_SYSFAIL("pthread_sigmask", status);
743#endif /* KMP_BLOCK_SIGNALS */
744
745 KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
746
747 if (__kmp_global.g.g_abort != 0) {
748 /* now we need to terminate the worker threads */
749 /* the value of t_abort is the signal we caught */
750
751 int gtid;
752
753 KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
754 __kmp_global.g.g_abort));
755
756 /* terminate the OpenMP worker threads */
757 /* TODO this is not valid for sibling threads!!
758 * the uber master might not be 0 anymore.. */
759 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
761
763
764 KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
765 __kmp_global.g.g_abort));
766
767 if (__kmp_global.g.g_abort > 0)
768 raise(__kmp_global.g.g_abort);
769 }
770
771 KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
772
773 return thr;
774}
775#endif // KMP_USE_MONITOR
776
777void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
778 pthread_t handle;
779 pthread_attr_t thread_attr;
780 int status;
781
782 th->th.th_info.ds.ds_gtid = gtid;
783
784#if KMP_STATS_ENABLED
785 // sets up worker thread stats
786 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
787
788 // th->th.th_stats is used to transfer thread-specific stats-pointer to
789 // __kmp_launch_worker. So when thread is created (goes into
790 // __kmp_launch_worker) it will set its thread local pointer to
791 // th->th.th_stats
792 if (!KMP_UBER_GTID(gtid)) {
793 th->th.th_stats = __kmp_stats_list->push_back(gtid);
794 } else {
795 // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
796 // so set the th->th.th_stats field to it.
797 th->th.th_stats = __kmp_stats_thread_ptr;
798 }
799 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
800
801#endif // KMP_STATS_ENABLED
802
803 if (KMP_UBER_GTID(gtid)) {
804 KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
805 th->th.th_info.ds.ds_thread = pthread_self();
806 __kmp_set_stack_info(gtid, th);
808 return;
809 }
810
811 KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
812
813 KMP_MB(); /* Flush all pending memory write invalidates. */
814
815#ifdef KMP_THREAD_ATTR
816 status = pthread_attr_init(&thread_attr);
817 if (status != 0) {
818 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
819 }
820 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
821 if (status != 0) {
822 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
823 }
824
825 /* Set stack size for this thread now.
826 The multiple of 2 is there because on some machines, requesting an unusual
827 stacksize causes the thread to have an offset before the dummy alloca()
828 takes place to create the offset. Since we want the user to have a
829 sufficient stacksize AND support a stack offset, we alloca() twice the
830 offset so that the upcoming alloca() does not eliminate any premade offset,
831 and also gives the user the stack space they requested for all threads */
832 stack_size += gtid * __kmp_stkoffset * 2;
833
834 KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
835 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
836 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
837
838#ifdef _POSIX_THREAD_ATTR_STACKSIZE
839 status = pthread_attr_setstacksize(&thread_attr, stack_size);
840#ifdef KMP_BACKUP_STKSIZE
841 if (status != 0) {
842 if (!__kmp_env_stksize) {
843 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
844 __kmp_stksize = KMP_BACKUP_STKSIZE;
845 KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
846 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
847 "bytes\n",
848 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
849 status = pthread_attr_setstacksize(&thread_attr, stack_size);
850 }
851 }
852#endif /* KMP_BACKUP_STKSIZE */
853 if (status != 0) {
854 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
855 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
856 }
857#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
858
859#endif /* KMP_THREAD_ATTR */
860
861 status =
862 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
863 if (status != 0 || !handle) { // ??? Why do we check handle??
864#ifdef _POSIX_THREAD_ATTR_STACKSIZE
865 if (status == EINVAL) {
866 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
867 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
868 }
869 if (status == ENOMEM) {
870 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
871 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
872 }
873#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
874 if (status == EAGAIN) {
875 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
876 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
877 }
878 KMP_SYSFAIL("pthread_create", status);
879 }
880
881 th->th.th_info.ds.ds_thread = handle;
882
883#ifdef KMP_THREAD_ATTR
884 status = pthread_attr_destroy(&thread_attr);
885 if (status) {
886 kmp_msg_t err_code = KMP_ERR(status);
887 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
890 __kmp_str_free(&err_code.str);
891 }
892 }
893#endif /* KMP_THREAD_ATTR */
894
895 KMP_MB(); /* Flush all pending memory write invalidates. */
896
897 KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
898
899} // __kmp_create_worker
900
901#if KMP_USE_MONITOR
902void __kmp_create_monitor(kmp_info_t *th) {
903 pthread_t handle;
904 pthread_attr_t thread_attr;
905 size_t size;
906 int status;
907 int auto_adj_size = FALSE;
908
910 // We don't need monitor thread in case of MAX_BLOCKTIME
911 KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
912 "MAX blocktime\n"));
913 th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
914 th->th.th_info.ds.ds_gtid = 0;
915 return;
916 }
917 KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
918
919 KMP_MB(); /* Flush all pending memory write invalidates. */
920
921 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
922 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
923#if KMP_REAL_TIME_FIX
924 TCW_4(__kmp_global.g.g_time.dt.t_value,
925 -1); // Will use it for synchronization a bit later.
926#else
927 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
928#endif // KMP_REAL_TIME_FIX
929
930#ifdef KMP_THREAD_ATTR
931 if (__kmp_monitor_stksize == 0) {
932 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
933 auto_adj_size = TRUE;
934 }
935 status = pthread_attr_init(&thread_attr);
936 if (status != 0) {
937 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
938 }
939 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
940 if (status != 0) {
941 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
942 }
943
944#ifdef _POSIX_THREAD_ATTR_STACKSIZE
945 status = pthread_attr_getstacksize(&thread_attr, &size);
946 KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
947#else
949#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
950#endif /* KMP_THREAD_ATTR */
951
952 if (__kmp_monitor_stksize == 0) {
953 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
954 }
955 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
956 __kmp_monitor_stksize = __kmp_sys_min_stksize;
957 }
958
959 KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
960 "requested stacksize = %lu bytes\n",
961 size, __kmp_monitor_stksize));
962
963retry:
964
965/* Set stack size for this thread now. */
966#ifdef _POSIX_THREAD_ATTR_STACKSIZE
967 KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
968 __kmp_monitor_stksize));
969 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
970 if (status != 0) {
971 if (auto_adj_size) {
972 __kmp_monitor_stksize *= 2;
973 goto retry;
974 }
975 kmp_msg_t err_code = KMP_ERR(status);
976 __kmp_msg(kmp_ms_warning, // should this be fatal? BB
977 KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
978 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
980 __kmp_str_free(&err_code.str);
981 }
982 }
983#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
984
985 status =
986 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
987
988 if (status != 0) {
989#ifdef _POSIX_THREAD_ATTR_STACKSIZE
990 if (status == EINVAL) {
991 if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
992 __kmp_monitor_stksize *= 2;
993 goto retry;
994 }
995 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
996 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
998 }
999 if (status == ENOMEM) {
1000 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
1001 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
1003 }
1004#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1005 if (status == EAGAIN) {
1006 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
1007 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
1008 }
1009 KMP_SYSFAIL("pthread_create", status);
1010 }
1011
1012 th->th.th_info.ds.ds_thread = handle;
1013
1014#if KMP_REAL_TIME_FIX
1015 // Wait for the monitor thread is really started and set its *priority*.
1016 KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
1017 sizeof(__kmp_global.g.g_time.dt.t_value));
1018 __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
1019 &__kmp_neq_4, NULL);
1020#endif // KMP_REAL_TIME_FIX
1021
1022#ifdef KMP_THREAD_ATTR
1023 status = pthread_attr_destroy(&thread_attr);
1024 if (status != 0) {
1025 kmp_msg_t err_code = KMP_ERR(status);
1026 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
1029 __kmp_str_free(&err_code.str);
1030 }
1031 }
1032#endif
1033
1034 KMP_MB(); /* Flush all pending memory write invalidates. */
1035
1036 KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
1037 th->th.th_info.ds.ds_thread));
1038
1039} // __kmp_create_monitor
1040#endif // KMP_USE_MONITOR
1041
1042void __kmp_exit_thread(int exit_status) {
1043#if KMP_OS_WASI
1044// TODO: the wasm32-wasi-threads target does not yet support pthread_exit.
1045#else
1046 pthread_exit((void *)(intptr_t)exit_status);
1047#endif
1048} // __kmp_exit_thread
1049
1050#if KMP_USE_MONITOR
1051void __kmp_resume_monitor();
1052
1053extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1054 int status;
1055 void *exit_val;
1056
1057 KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
1058 " %#.8lx\n",
1059 th->th.th_info.ds.ds_thread));
1060
1061 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1062 // If both tid and gtid are 0, it means the monitor did not ever start.
1063 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1064 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1065 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1066 KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
1067 return;
1068 }
1069
1070 KMP_MB(); /* Flush all pending memory write invalidates. */
1071
1072 /* First, check to see whether the monitor thread exists to wake it up. This
1073 is to avoid performance problem when the monitor sleeps during
1074 blocktime-size interval */
1075
1076 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1077 if (status != ESRCH) {
1078 __kmp_resume_monitor(); // Wake up the monitor thread
1079 }
1080 KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1081 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1082 if (exit_val != th) {
1083 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1084 }
1085
1086 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1087 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1088
1089 KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1090 " %#.8lx\n",
1091 th->th.th_info.ds.ds_thread));
1092
1093 KMP_MB(); /* Flush all pending memory write invalidates. */
1094}
1095#else
1096// Empty symbol to export (see exports_so.txt) when
1097// monitor thread feature is disabled
1098extern "C" void __kmp_reap_monitor(kmp_info_t *th) { (void)th; }
1099#endif // KMP_USE_MONITOR
1100
1102 int status;
1103 void *exit_val;
1104
1105 KMP_MB(); /* Flush all pending memory write invalidates. */
1106
1107 KA_TRACE(
1108 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1109
1110 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1111#ifdef KMP_DEBUG
1112 /* Don't expose these to the user until we understand when they trigger */
1113 if (status != 0) {
1114 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1115 }
1116 if (exit_val != th) {
1117 KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1118 "exit_val = %p\n",
1119 th->th.th_info.ds.ds_gtid, exit_val));
1120 }
1121#else
1122 (void)status; // unused variable
1123#endif /* KMP_DEBUG */
1124
1125 KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1126 th->th.th_info.ds.ds_gtid));
1127
1128 KMP_MB(); /* Flush all pending memory write invalidates. */
1129}
1130
1131#if KMP_HANDLE_SIGNALS
1132
1133static void __kmp_null_handler(int signo) {
1134 // Do nothing, for doing SIG_IGN-type actions.
1135} // __kmp_null_handler
1136
1137static void __kmp_team_handler(int signo) {
1138 if (__kmp_global.g.g_abort == 0) {
1139/* Stage 1 signal handler, let's shut down all of the threads */
1140#ifdef KMP_DEBUG
1141 __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1142#endif
1143 switch (signo) {
1144 case SIGHUP:
1145 case SIGINT:
1146 case SIGQUIT:
1147 case SIGILL:
1148 case SIGABRT:
1149 case SIGFPE:
1150 case SIGBUS:
1151 case SIGSEGV:
1152#ifdef SIGSYS
1153 case SIGSYS:
1154#endif
1155 case SIGTERM:
1156 if (__kmp_debug_buf) {
1158 }
1159 __kmp_unregister_library(); // cleanup shared memory
1160 KMP_MB(); // Flush all pending memory write invalidates.
1161 TCW_4(__kmp_global.g.g_abort, signo);
1162 KMP_MB(); // Flush all pending memory write invalidates.
1163 TCW_4(__kmp_global.g.g_done, TRUE);
1164 KMP_MB(); // Flush all pending memory write invalidates.
1165 break;
1166 default:
1167#ifdef KMP_DEBUG
1168 __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1169#endif
1170 break;
1171 }
1172 }
1173} // __kmp_team_handler
1174
1175static void __kmp_sigaction(int signum, const struct sigaction *act,
1176 struct sigaction *oldact) {
1177 int rc = sigaction(signum, act, oldact);
1178 KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1179}
1180
1181static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1182 int parallel_init) {
1183 KMP_MB(); // Flush all pending memory write invalidates.
1184 KB_TRACE(60,
1185 ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1186 if (parallel_init) {
1187 struct sigaction new_action;
1188 struct sigaction old_action;
1189 new_action.sa_handler = handler_func;
1190 new_action.sa_flags = 0;
1191 sigfillset(&new_action.sa_mask);
1192 __kmp_sigaction(sig, &new_action, &old_action);
1193 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1194 sigaddset(&__kmp_sigset, sig);
1195 } else {
1196 // Restore/keep user's handler if one previously installed.
1197 __kmp_sigaction(sig, &old_action, NULL);
1198 }
1199 } else {
1200 // Save initial/system signal handlers to see if user handlers installed.
1201 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1202 }
1203 KMP_MB(); // Flush all pending memory write invalidates.
1204} // __kmp_install_one_handler
1205
1206static void __kmp_remove_one_handler(int sig) {
1207 KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1208 if (sigismember(&__kmp_sigset, sig)) {
1209 struct sigaction old;
1210 KMP_MB(); // Flush all pending memory write invalidates.
1211 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1212 if ((old.sa_handler != __kmp_team_handler) &&
1213 (old.sa_handler != __kmp_null_handler)) {
1214 // Restore the users signal handler.
1215 KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1216 "restoring: sig=%d\n",
1217 sig));
1218 __kmp_sigaction(sig, &old, NULL);
1219 }
1220 sigdelset(&__kmp_sigset, sig);
1221 KMP_MB(); // Flush all pending memory write invalidates.
1222 }
1223} // __kmp_remove_one_handler
1224
1225void __kmp_install_signals(int parallel_init) {
1226 KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1227 if (__kmp_handle_signals || !parallel_init) {
1228 // If ! parallel_init, we do not install handlers, just save original
1229 // handlers. Let us do it even __handle_signals is 0.
1230 sigemptyset(&__kmp_sigset);
1231 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1232 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1233 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1234 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1235 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1236 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1237 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1238 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1239#ifdef SIGSYS
1240 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1241#endif // SIGSYS
1242 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1243#ifdef SIGPIPE
1244 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1245#endif // SIGPIPE
1246 }
1247} // __kmp_install_signals
1248
1249void __kmp_remove_signals(void) {
1250 int sig;
1251 KB_TRACE(10, ("__kmp_remove_signals()\n"));
1252 for (sig = 1; sig < NSIG; ++sig) {
1253 __kmp_remove_one_handler(sig);
1254 }
1255} // __kmp_remove_signals
1256
1257#endif // KMP_HANDLE_SIGNALS
1258
1259void __kmp_enable(int new_state) {
1260#ifdef KMP_CANCEL_THREADS
1261 int status, old_state;
1262 status = pthread_setcancelstate(new_state, &old_state);
1263 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1264 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1265#endif
1266}
1267
1268void __kmp_disable(int *old_state) {
1269#ifdef KMP_CANCEL_THREADS
1270 int status;
1271 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1272 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1273#endif
1274}
1275
1276static void __kmp_atfork_prepare(void) {
1279}
1280
1281static void __kmp_atfork_parent(void) {
1284}
1285
1286/* Reset the library so execution in the child starts "all over again" with
1287 clean data structures in initial states. Don't worry about freeing memory
1288 allocated by parent, just abandon it to be safe. */
1289static void __kmp_atfork_child(void) {
1292 /* TODO make sure this is done right for nested/sibling */
1293 // ATT: Memory leaks are here? TODO: Check it and fix.
1294 /* KMP_ASSERT( 0 ); */
1295
1297
1298#if KMP_AFFINITY_SUPPORTED
1299#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
1300 KMP_OS_AIX
1301 // reset the affinity in the child to the initial thread
1302 // affinity in the parent
1303 kmp_set_thread_affinity_mask_initial();
1304#endif
1305 // Set default not to bind threads tightly in the child (we're expecting
1306 // over-subscription after the fork and this can improve things for
1307 // scripting languages that use OpenMP inside process-parallel code).
1308 if (__kmp_nested_proc_bind.bind_types != NULL) {
1310 }
1311 for (kmp_affinity_t *affinity : __kmp_affinities)
1312 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1313 __kmp_affin_fullMask = nullptr;
1314 __kmp_affin_origMask = nullptr;
1315 __kmp_topology = nullptr;
1316#endif // KMP_AFFINITY_SUPPORTED
1317
1318#if KMP_USE_MONITOR
1319 __kmp_init_monitor = 0;
1320#endif
1326
1328#if !KMP_USE_DYNAMIC_LOCK
1332 __kmp_lock_blocks = NULL;
1333#endif
1334
1335 __kmp_all_nth = 0;
1336 TCW_4(__kmp_nth, 0);
1337
1338 __kmp_thread_pool = NULL;
1340 __kmp_team_pool = NULL;
1341
1342 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1343 here so threadprivate doesn't use stale data */
1344 KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1346
1347 while (__kmp_threadpriv_cache_list != NULL) {
1348
1349 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1350 KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1352
1354 }
1356 }
1357
1359
1360 /* reset statically initialized locks */
1365
1366#if USE_ITT_BUILD
1367 __kmp_itt_reset(); // reset ITT's global state
1368#endif /* USE_ITT_BUILD */
1369
1370 {
1371 // Child process often get terminated without any use of OpenMP. That might
1372 // cause mapped shared memory file to be left unattended. Thus we postpone
1373 // library registration till middle initialization in the child process.
1376 }
1377
1378 /* This is necessary to make sure no stale data is left around */
1379 /* AC: customers complain that we use unsafe routines in the atfork
1380 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1381 in dynamic_link when check the presence of shared tbbmalloc library.
1382 Suggestion is to make the library initialization lazier, similar
1383 to what done for __kmpc_begin(). */
1384 // TODO: synchronize all static initializations with regular library
1385 // startup; look at kmp_global.cpp and etc.
1386 //__kmp_internal_begin ();
1387}
1388
1391#if !KMP_OS_WASI
1392 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1394 KMP_CHECK_SYSFAIL("pthread_atfork", status);
1395#endif
1397 }
1398}
1399
1401 int status;
1402 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1403 KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1404 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1405 KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1406}
1407
1409 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1410 int new_value = __kmp_fork_count + 1;
1411 // Return if already initialized
1412 if (old_value == new_value)
1413 return;
1414 // Wait, then return if being initialized
1415 if (old_value == -1 || !__kmp_atomic_compare_store(
1416 &th->th.th_suspend_init_count, old_value, -1)) {
1417 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1418 KMP_CPU_PAUSE();
1419 }
1420 } else {
1421 // Claim to be the initializer and do initializations
1422 int status;
1423 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1425 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1426 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1428 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1429 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1430 }
1431}
1432
1434 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1435 /* this means we have initialize the suspension pthread objects for this
1436 thread in this instance of the process */
1437 int status;
1438
1439 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1440 if (status != 0 && status != EBUSY) {
1441 KMP_SYSFAIL("pthread_cond_destroy", status);
1442 }
1443 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1444 if (status != 0 && status != EBUSY) {
1445 KMP_SYSFAIL("pthread_mutex_destroy", status);
1446 }
1447 --th->th.th_suspend_init_count;
1448 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1450 }
1451}
1452
1453// return true if lock obtained, false otherwise
1455 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1456}
1457
1459 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1460 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1461}
1462
1464 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1465 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1466}
1467
1468/* This routine puts the calling thread to sleep after setting the
1469 sleep bit for the indicated flag variable to true. */
1470template <class C>
1471static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1473 kmp_info_t *th = __kmp_threads[th_gtid];
1474 int status;
1475 typename C::flag_t old_spin;
1476
1477 KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1478 flag->get()));
1479
1481
1483
1484 KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1485 th_gtid, flag->get()));
1486
1487 /* TODO: shouldn't this use release semantics to ensure that
1488 __kmp_suspend_initialize_thread gets called first? */
1489 old_spin = flag->set_sleeping();
1490 TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1491 th->th.th_sleep_loc_type = flag->get_type();
1494 flag->unset_sleeping();
1495 TCW_PTR(th->th.th_sleep_loc, NULL);
1496 th->th.th_sleep_loc_type = flag_unset;
1498 return;
1499 }
1500 KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1501 " was %x\n",
1502 th_gtid, flag->get(), flag->load(), old_spin));
1503
1504 if (flag->done_check_val(old_spin) || flag->done_check()) {
1505 flag->unset_sleeping();
1506 TCW_PTR(th->th.th_sleep_loc, NULL);
1507 th->th.th_sleep_loc_type = flag_unset;
1508 KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1509 "for spin(%p)\n",
1510 th_gtid, flag->get()));
1511 } else {
1512 /* Encapsulate in a loop as the documentation states that this may
1513 "with low probability" return when the condition variable has
1514 not been signaled or broadcast */
1515 int deactivated = FALSE;
1516
1517 while (flag->is_sleeping()) {
1518#ifdef DEBUG_SUSPEND
1519 char buffer[128];
1520 __kmp_suspend_count++;
1521 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1522 __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1523 buffer);
1524#endif
1525 // Mark the thread as no longer active (only in the first iteration of the
1526 // loop).
1527 if (!deactivated) {
1528 th->th.th_active = FALSE;
1529 if (th->th.th_active_in_pool) {
1530 th->th.th_active_in_pool = FALSE;
1533 }
1534 deactivated = TRUE;
1535 }
1536
1537 KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1538 KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1539
1540#if USE_SUSPEND_TIMEOUT
1541 struct timespec now;
1542 struct timeval tval;
1543 int msecs;
1544
1545 status = gettimeofday(&tval, NULL);
1546 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1547 TIMEVAL_TO_TIMESPEC(&tval, &now);
1548
1549 msecs = (4 * __kmp_dflt_blocktime) + 200;
1550 now.tv_sec += msecs / 1000;
1551 now.tv_nsec += (msecs % 1000) * 1000;
1552
1553 KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1554 "pthread_cond_timedwait\n",
1555 th_gtid));
1556 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1557 &th->th.th_suspend_mx.m_mutex, &now);
1558#else
1559 KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1560 " pthread_cond_wait\n",
1561 th_gtid));
1562 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1563 &th->th.th_suspend_mx.m_mutex);
1564#endif // USE_SUSPEND_TIMEOUT
1565
1566 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1567 KMP_SYSFAIL("pthread_cond_wait", status);
1568 }
1569
1570 KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1571
1572 if (!flag->is_sleeping() &&
1573 ((status == EINTR) || (status == ETIMEDOUT))) {
1574 // if interrupt or timeout, and thread is no longer sleeping, we need to
1575 // make sure sleep_loc gets reset; however, this shouldn't be needed if
1576 // we woke up with resume
1577 flag->unset_sleeping();
1578 TCW_PTR(th->th.th_sleep_loc, NULL);
1579 th->th.th_sleep_loc_type = flag_unset;
1580 }
1581#ifdef KMP_DEBUG
1582 if (status == ETIMEDOUT) {
1583 if (flag->is_sleeping()) {
1584 KF_TRACE(100,
1585 ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1586 } else {
1587 KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1588 "not set!\n",
1589 th_gtid));
1590 TCW_PTR(th->th.th_sleep_loc, NULL);
1591 th->th.th_sleep_loc_type = flag_unset;
1592 }
1593 } else if (flag->is_sleeping()) {
1594 KF_TRACE(100,
1595 ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1596 }
1597#endif
1598 } // while
1599
1600 // Mark the thread as active again (if it was previous marked as inactive)
1601 if (deactivated) {
1602 th->th.th_active = TRUE;
1603 if (TCR_4(th->th.th_in_pool)) {
1605 th->th.th_active_in_pool = TRUE;
1606 }
1607 }
1608 }
1609 // We may have had the loop variable set before entering the loop body;
1610 // so we need to reset sleep_loc.
1611 TCW_PTR(th->th.th_sleep_loc, NULL);
1612 th->th.th_sleep_loc_type = flag_unset;
1613
1614 KMP_DEBUG_ASSERT(!flag->is_sleeping());
1615 KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1616#ifdef DEBUG_SUSPEND
1617 {
1618 char buffer[128];
1619 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1620 __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1621 buffer);
1622 }
1623#endif
1624
1626 KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1627}
1628
1629template <bool C, bool S>
1631 __kmp_suspend_template(th_gtid, flag);
1632}
1633template <bool C, bool S>
1635 __kmp_suspend_template(th_gtid, flag);
1636}
1637template <bool C, bool S>
1639 __kmp_suspend_template(th_gtid, flag);
1640}
1642 __kmp_suspend_template(th_gtid, flag);
1643}
1644
1648template void
1650template void
1652
1653/* This routine signals the thread specified by target_gtid to wake up
1654 after setting the sleep bit indicated by the flag argument to FALSE.
1655 The target thread must already have called __kmp_suspend_template() */
1656template <class C>
1657static inline void __kmp_resume_template(int target_gtid, C *flag) {
1659 kmp_info_t *th = __kmp_threads[target_gtid];
1660 int status;
1661
1662#ifdef KMP_DEBUG
1663 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1664#endif
1665
1666 KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1667 gtid, target_gtid));
1668 KMP_DEBUG_ASSERT(gtid != target_gtid);
1669
1671
1673
1674 if (!flag || flag != th->th.th_sleep_loc) {
1675 // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1676 // different location; wake up at new location
1677 flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1678 }
1679
1680 // First, check if the flag is null or its type has changed. If so, someone
1681 // else woke it up.
1682 if (!flag) { // Thread doesn't appear to be sleeping on anything
1683 KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1684 "awake: flag(%p)\n",
1685 gtid, target_gtid, (void *)NULL));
1687 return;
1688 } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1689 // Flag type does not appear to match this function template; possibly the
1690 // thread is sleeping on something else. Try null resume again.
1691 KF_TRACE(
1692 5,
1693 ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1694 "spin(%p) type=%d ptr_type=%d\n",
1695 gtid, target_gtid, flag, flag->get(), flag->get_type(),
1696 th->th.th_sleep_loc_type));
1699 return;
1700 } else { // if multiple threads are sleeping, flag should be internally
1701 // referring to a specific thread here
1702 if (!flag->is_sleeping()) {
1703 KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1704 "awake: flag(%p): %u\n",
1705 gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1707 return;
1708 }
1709 }
1711 flag->unset_sleeping();
1712 TCW_PTR(th->th.th_sleep_loc, NULL);
1713 th->th.th_sleep_loc_type = flag_unset;
1714
1715 KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1716 "sleep bit for flag's loc(%p): %u\n",
1717 gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1718
1719#ifdef DEBUG_SUSPEND
1720 {
1721 char buffer[128];
1722 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1723 __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1724 target_gtid, buffer);
1725 }
1726#endif
1727 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1728 KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1730 KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1731 " for T#%d\n",
1732 gtid, target_gtid));
1733}
1734
1735template <bool C, bool S>
1736void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1737 __kmp_resume_template(target_gtid, flag);
1738}
1739template <bool C, bool S>
1740void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1741 __kmp_resume_template(target_gtid, flag);
1742}
1743template <bool C, bool S>
1745 __kmp_resume_template(target_gtid, flag);
1746}
1748 __kmp_resume_template(target_gtid, flag);
1749}
1750
1754template void
1756
1757#if KMP_USE_MONITOR
1758void __kmp_resume_monitor() {
1760 int status;
1761#ifdef KMP_DEBUG
1762 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1763 KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1766#endif
1767 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1768 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1769#ifdef DEBUG_SUSPEND
1770 {
1771 char buffer[128];
1772 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1773 __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1774 KMP_GTID_MONITOR, buffer);
1775 }
1776#endif
1777 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1778 KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1779 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1780 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1781 KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1782 " for T#%d\n",
1783 gtid, KMP_GTID_MONITOR));
1784}
1785#endif // KMP_USE_MONITOR
1786
1787void __kmp_yield() { sched_yield(); }
1788
1790 if (__kmp_init_gtid) {
1791 int status;
1792 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1793 (void *)(intptr_t)(gtid + 1));
1794 KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1795 } else {
1796 KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1797 }
1798}
1799
1801 int gtid;
1802 if (!__kmp_init_gtid) {
1803 KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1804 "KMP_GTID_SHUTDOWN\n"));
1805 return KMP_GTID_SHUTDOWN;
1806 }
1807 gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1808 if (gtid == 0) {
1809 gtid = KMP_GTID_DNE;
1810 } else {
1811 gtid--;
1812 }
1813 KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1815 return gtid;
1816}
1817
1819 /*clock_t t;*/
1820 struct tms buffer;
1821
1822 /*t =*/times(&buffer);
1823
1824 return (double)(buffer.tms_utime + buffer.tms_cutime) /
1825 (double)CLOCKS_PER_SEC;
1826}
1827
1829 int status;
1830 struct rusage r_usage;
1831
1832 memset(info, 0, sizeof(*info));
1833
1834 status = getrusage(RUSAGE_SELF, &r_usage);
1835 KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1836
1837#if !KMP_OS_WASI
1838 // The maximum resident set size utilized (in kilobytes)
1839 info->maxrss = r_usage.ru_maxrss;
1840 // The number of page faults serviced without any I/O
1841 info->minflt = r_usage.ru_minflt;
1842 // The number of page faults serviced that required I/O
1843 info->majflt = r_usage.ru_majflt;
1844 // The number of times a process was "swapped" out of memory
1845 info->nswap = r_usage.ru_nswap;
1846 // The number of times the file system had to perform input
1847 info->inblock = r_usage.ru_inblock;
1848 // The number of times the file system had to perform output
1849 info->oublock = r_usage.ru_oublock;
1850 // The number of times a context switch was voluntarily
1851 info->nvcsw = r_usage.ru_nvcsw;
1852 // The number of times a context switch was forced
1853 info->nivcsw = r_usage.ru_nivcsw;
1854#endif
1855
1856 return (status != 0);
1857}
1858
1859void __kmp_read_system_time(double *delta) {
1860 double t_ns;
1861 struct timeval tval;
1862 struct timespec stop;
1863 int status;
1864
1865 status = gettimeofday(&tval, NULL);
1866 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1867 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1869 *delta = (t_ns * 1e-9);
1870}
1871
1873 struct timeval tval;
1874 int status;
1875 status = gettimeofday(&tval, NULL);
1876 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1878}
1879
1880static int __kmp_get_xproc(void) {
1881
1882 int r = 0;
1883
1884#if KMP_OS_LINUX
1885
1886 __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1887
1888#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1889 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1890
1891 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1892
1893#elif KMP_OS_DARWIN
1894
1895 size_t len = sizeof(r);
1896 sysctlbyname("hw.logicalcpu", &r, &len, NULL, 0);
1897
1898#else
1899
1900#error "Unknown or unsupported OS."
1901
1902#endif
1903
1904 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1905
1906} // __kmp_get_xproc
1907
1908int __kmp_read_from_file(char const *path, char const *format, ...) {
1909 int result;
1910 va_list args;
1911
1912 va_start(args, format);
1913 FILE *f = fopen(path, "rb");
1914 if (f == NULL) {
1915 va_end(args);
1916 return 0;
1917 }
1918 result = vfscanf(f, format, args);
1919 fclose(f);
1920 va_end(args);
1921
1922 return result;
1923}
1924
1926 int status;
1927 pthread_mutexattr_t mutex_attr;
1928 pthread_condattr_t cond_attr;
1929
1930 if (__kmp_init_runtime) {
1931 return;
1932 }
1933
1934#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1935 if (!__kmp_cpuinfo.initialized) {
1936 __kmp_query_cpuid(&__kmp_cpuinfo);
1937 }
1938#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1939
1941
1942#if !KMP_32_BIT_ARCH
1943 struct rlimit rlim;
1944 // read stack size of calling thread, save it as default for worker threads;
1945 // this should be done before reading environment variables
1946 status = getrlimit(RLIMIT_STACK, &rlim);
1947 if (status == 0) { // success?
1948 __kmp_stksize = rlim.rlim_cur;
1949 __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1950 }
1951#endif /* KMP_32_BIT_ARCH */
1952
1953 if (sysconf(_SC_THREADS)) {
1954
1955 /* Query the maximum number of threads */
1956 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1957#ifdef __ve__
1958 if (__kmp_sys_max_nth == -1) {
1959 // VE's pthread supports only up to 64 threads per a VE process.
1960 // So we use that KMP_MAX_NTH (predefined as 64) here.
1962 }
1963#else
1964 if (__kmp_sys_max_nth == -1) {
1965 /* Unlimited threads for NPTL */
1966 __kmp_sys_max_nth = INT_MAX;
1967 } else if (__kmp_sys_max_nth <= 1) {
1968 /* Can't tell, just use PTHREAD_THREADS_MAX */
1970 }
1971#endif
1972
1973 /* Query the minimum stack size */
1974 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1975 if (__kmp_sys_min_stksize <= 1) {
1977 }
1978 }
1979
1980 /* Set up minimum number of threads to switch to TLS gtid */
1982
1983 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1985 KMP_CHECK_SYSFAIL("pthread_key_create", status);
1986 status = pthread_mutexattr_init(&mutex_attr);
1987 KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1988 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1989 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1990 status = pthread_mutexattr_destroy(&mutex_attr);
1991 KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1992 status = pthread_condattr_init(&cond_attr);
1993 KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1994 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1995 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1996 status = pthread_condattr_destroy(&cond_attr);
1997 KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1998#if USE_ITT_BUILD
1999 __kmp_itt_initialize();
2000#endif /* USE_ITT_BUILD */
2001
2003}
2004
2006 int status;
2007
2008 if (!__kmp_init_runtime) {
2009 return; // Nothing to do.
2010 }
2011
2012#if USE_ITT_BUILD
2013 __kmp_itt_destroy();
2014#endif /* USE_ITT_BUILD */
2015
2016 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
2017 KMP_CHECK_SYSFAIL("pthread_key_delete", status);
2018
2019 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
2020 if (status != 0 && status != EBUSY) {
2021 KMP_SYSFAIL("pthread_mutex_destroy", status);
2022 }
2023 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
2024 if (status != 0 && status != EBUSY) {
2025 KMP_SYSFAIL("pthread_cond_destroy", status);
2026 }
2027#if KMP_AFFINITY_SUPPORTED
2028 __kmp_affinity_uninitialize();
2029#endif
2030
2032}
2033
2034/* Put the thread to sleep for a time period */
2035/* NOTE: not currently used anywhere */
2036void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
2037
2038/* Calculate the elapsed wall clock time for the user */
2039void __kmp_elapsed(double *t) {
2040 int status;
2041#ifdef FIX_SGI_CLOCK
2042 struct timespec ts;
2043
2044 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2045 KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
2046 *t =
2047 (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
2048#else
2049 struct timeval tv;
2050
2051 status = gettimeofday(&tv, NULL);
2052 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
2053 *t =
2054 (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
2055#endif
2056}
2057
2058/* Calculate the elapsed wall clock tick for the user */
2059void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2060
2061/* Return the current time stamp in nsec */
2063 struct timeval t;
2064 gettimeofday(&t, NULL);
2065 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2066 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2067 return nsec;
2068}
2069
2070#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2071/* Measure clock ticks per millisecond */
2073 kmp_uint64 now, nsec2, diff;
2074 kmp_uint64 delay = 1000000; // ~450 usec on most machines.
2075 kmp_uint64 nsec = __kmp_now_nsec();
2077 while ((now = __kmp_hardware_timestamp()) < goal)
2078 ;
2079 nsec2 = __kmp_now_nsec();
2080 diff = nsec2 - nsec;
2081 if (diff > 0) {
2082 double tpus = 1000.0 * (double)(delay + (now - goal)) / (double)diff;
2083 if (tpus > 0.0) {
2084 __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2086 }
2087 }
2088}
2089#endif
2090
2091/* Determine whether the given address is mapped into the current address
2092 space. */
2093
2095
2096 int found = 0;
2097 int rc;
2098
2099#if KMP_OS_LINUX || KMP_OS_HURD
2100
2101 /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2102 address ranges mapped into the address space. */
2103
2104 char *name = __kmp_str_format("/proc/%d/maps", getpid());
2105 FILE *file = NULL;
2106
2107 file = fopen(name, "r");
2108 KMP_ASSERT(file != NULL);
2109
2110 for (;;) {
2111
2112 void *beginning = NULL;
2113 void *ending = NULL;
2114 char perms[5];
2115
2116 rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2117 if (rc == EOF) {
2118 break;
2119 }
2120 KMP_ASSERT(rc == 3 &&
2121 KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2122
2123 // Ending address is not included in the region, but beginning is.
2124 if ((addr >= beginning) && (addr < ending)) {
2125 perms[2] = 0; // 3th and 4th character does not matter.
2126 if (strcmp(perms, "rw") == 0) {
2127 // Memory we are looking for should be readable and writable.
2128 found = 1;
2129 }
2130 break;
2131 }
2132 }
2133
2134 // Free resources.
2135 fclose(file);
2137#elif KMP_OS_FREEBSD
2138 char *buf;
2139 size_t lstsz;
2140 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2141 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2142 if (rc < 0)
2143 return 0;
2144 // We pass from number of vm entry's semantic
2145 // to size of whole entry map list.
2146 lstsz = lstsz * 4 / 3;
2147 buf = reinterpret_cast<char *>(KMP_INTERNAL_MALLOC(lstsz));
2148 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2149 if (rc < 0) {
2151 return 0;
2152 }
2153
2154 char *lw = buf;
2155 char *up = buf + lstsz;
2156
2157 while (lw < up) {
2158 struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2159 size_t cursz = cur->kve_structsize;
2160 if (cursz == 0)
2161 break;
2162 void *start = reinterpret_cast<void *>(cur->kve_start);
2163 void *end = reinterpret_cast<void *>(cur->kve_end);
2164 // Readable/Writable addresses within current map entry
2165 if ((addr >= start) && (addr < end)) {
2166 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2167 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2168 found = 1;
2169 break;
2170 }
2171 }
2172 lw += cursz;
2173 }
2175#elif KMP_OS_DRAGONFLY
2176 char err[_POSIX2_LINE_MAX];
2177 kinfo_proc *proc;
2178 vmspace sp;
2179 vm_map *cur;
2180 vm_map_entry entry, *c;
2181 struct proc p;
2182 kvm_t *fd;
2183 uintptr_t uaddr;
2184 int num;
2185
2186 fd = kvm_openfiles(nullptr, nullptr, nullptr, O_RDONLY, err);
2187 if (!fd) {
2188 return 0;
2189 }
2190
2191 proc = kvm_getprocs(fd, KERN_PROC_PID, getpid(), &num);
2192
2193 if (kvm_read(fd, static_cast<uintptr_t>(proc->kp_paddr), &p, sizeof(p)) !=
2194 sizeof(p) ||
2195 kvm_read(fd, reinterpret_cast<uintptr_t>(p.p_vmspace), &sp, sizeof(sp)) !=
2196 sizeof(sp)) {
2197 kvm_close(fd);
2198 return 0;
2199 }
2200
2201 (void)rc;
2202 cur = &sp.vm_map;
2203 uaddr = reinterpret_cast<uintptr_t>(addr);
2204 for (c = kvm_vm_map_entry_first(fd, cur, &entry); c;
2205 c = kvm_vm_map_entry_next(fd, c, &entry)) {
2206 if ((uaddr >= entry.ba.start) && (uaddr <= entry.ba.end)) {
2207 if ((entry.protection & VM_PROT_READ) != 0 &&
2208 (entry.protection & VM_PROT_WRITE) != 0) {
2209 found = 1;
2210 break;
2211 }
2212 }
2213 }
2214
2215 kvm_close(fd);
2216#elif KMP_OS_SOLARIS
2217 prmap_t *cur, *map;
2218 void *buf;
2219 uintptr_t uaddr;
2220 ssize_t rd;
2221 int err;
2222 int file;
2223
2224 pid_t pid = getpid();
2225 struct ps_prochandle *fd = Pgrab(pid, PGRAB_RDONLY, &err);
2226 ;
2227
2228 if (!fd) {
2229 return 0;
2230 }
2231
2232 char *name = __kmp_str_format("/proc/%d/map", pid);
2233 size_t sz = (1 << 20);
2234 file = open(name, O_RDONLY);
2235 if (file == -1) {
2237 return 0;
2238 }
2239
2241
2242 while (sz > 0 && (rd = pread(file, buf, sz, 0)) == sz) {
2243 void *newbuf;
2244 sz <<= 1;
2245 newbuf = KMP_INTERNAL_REALLOC(buf, sz);
2246 buf = newbuf;
2247 }
2248
2249 map = reinterpret_cast<prmap_t *>(buf);
2250 uaddr = reinterpret_cast<uintptr_t>(addr);
2251
2252 for (cur = map; rd > 0; cur++, rd = -sizeof(*map)) {
2253 if ((uaddr >= cur->pr_vaddr) && (uaddr < cur->pr_vaddr)) {
2254 if ((cur->pr_mflags & MA_READ) != 0 && (cur->pr_mflags & MA_WRITE) != 0) {
2255 found = 1;
2256 break;
2257 }
2258 }
2259 }
2260
2261 KMP_INTERNAL_FREE(map);
2262 close(file);
2264#elif KMP_OS_DARWIN
2265
2266 /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2267 using vm interface. */
2268
2269 int buffer;
2270 vm_size_t count;
2271 rc = vm_read_overwrite(
2272 mach_task_self(), // Task to read memory of.
2273 (vm_address_t)(addr), // Address to read from.
2274 1, // Number of bytes to be read.
2275 (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2276 &count // Address of var to save number of read bytes in.
2277 );
2278 if (rc == 0) {
2279 // Memory successfully read.
2280 found = 1;
2281 }
2282
2283#elif KMP_OS_NETBSD
2284
2285 int mib[5];
2286 mib[0] = CTL_VM;
2287 mib[1] = VM_PROC;
2288 mib[2] = VM_PROC_MAP;
2289 mib[3] = getpid();
2290 mib[4] = sizeof(struct kinfo_vmentry);
2291
2292 size_t size;
2293 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2294 KMP_ASSERT(!rc);
2296
2297 size = size * 4 / 3;
2298 struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2299 KMP_ASSERT(kiv);
2300
2301 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2302 KMP_ASSERT(!rc);
2304
2305 for (size_t i = 0; i < size; i++) {
2306 if (kiv[i].kve_start >= (uint64_t)addr &&
2307 kiv[i].kve_end <= (uint64_t)addr) {
2308 found = 1;
2309 break;
2310 }
2311 }
2312 KMP_INTERNAL_FREE(kiv);
2313#elif KMP_OS_OPENBSD
2314
2315 int mib[3];
2316 mib[0] = CTL_KERN;
2317 mib[1] = KERN_PROC_VMMAP;
2318 mib[2] = getpid();
2319
2320 size_t size;
2321 uint64_t end;
2322 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2323 KMP_ASSERT(!rc);
2325 end = size;
2326
2327 struct kinfo_vmentry kiv = {.kve_start = 0};
2328
2329 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2331 if (kiv.kve_end == end)
2332 break;
2333
2334 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2335 found = 1;
2336 break;
2337 }
2338 kiv.kve_start += 1;
2339 }
2340#elif KMP_OS_WASI
2341 found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2342#elif KMP_OS_AIX
2343
2344 uint32_t loadQueryBufSize = 4096u; // Default loadquery buffer size.
2345 char *loadQueryBuf;
2346
2347 for (;;) {
2348 loadQueryBuf = (char *)KMP_INTERNAL_MALLOC(loadQueryBufSize);
2349 if (loadQueryBuf == NULL) {
2350 return 0;
2351 }
2352
2353 rc = loadquery(L_GETXINFO | L_IGNOREUNLOAD, loadQueryBuf, loadQueryBufSize);
2354 if (rc < 0) {
2355 KMP_INTERNAL_FREE(loadQueryBuf);
2356 if (errno != ENOMEM) {
2357 return 0;
2358 }
2359 // errno == ENOMEM; double the size.
2360 loadQueryBufSize <<= 1;
2361 continue;
2362 }
2363 // Obtained the load info successfully.
2364 break;
2365 }
2366
2367 struct ld_xinfo *curLdInfo = (struct ld_xinfo *)loadQueryBuf;
2368
2369 // Loop through the load info to find if there is a match.
2370 for (;;) {
2371 uintptr_t curDataStart = (uintptr_t)curLdInfo->ldinfo_dataorg;
2372 uintptr_t curDataEnd = curDataStart + curLdInfo->ldinfo_datasize;
2373
2374 // The data segment is readable and writable.
2375 if (curDataStart <= (uintptr_t)addr && (uintptr_t)addr < curDataEnd) {
2376 found = 1;
2377 break;
2378 }
2379 if (curLdInfo->ldinfo_next == 0u) {
2380 // Reached the end of load info.
2381 break;
2382 }
2383 curLdInfo = (struct ld_xinfo *)((char *)curLdInfo + curLdInfo->ldinfo_next);
2384 }
2385 KMP_INTERNAL_FREE(loadQueryBuf);
2386
2387#else
2388
2389#error "Unknown or unsupported OS"
2390
2391#endif
2392
2393 return found;
2394
2395} // __kmp_is_address_mapped
2396
2397#ifdef USE_LOAD_BALANCE
2398
2399#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2400 KMP_OS_OPENBSD || KMP_OS_SOLARIS
2401
2402// The function returns the rounded value of the system load average
2403// during given time interval which depends on the value of
2404// __kmp_load_balance_interval variable (default is 60 sec, other values
2405// may be 300 sec or 900 sec).
2406// It returns -1 in case of error.
2407int __kmp_get_load_balance(int max) {
2408 double averages[3];
2409 int ret_avg = 0;
2410
2411 int res = getloadavg(averages, 3);
2412
2413 // Check __kmp_load_balance_interval to determine which of averages to use.
2414 // getloadavg() may return the number of samples less than requested that is
2415 // less than 3.
2416 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2417 ret_avg = (int)averages[0]; // 1 min
2418 } else if ((__kmp_load_balance_interval >= 180 &&
2419 __kmp_load_balance_interval < 600) &&
2420 (res >= 2)) {
2421 ret_avg = (int)averages[1]; // 5 min
2422 } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2423 ret_avg = (int)averages[2]; // 15 min
2424 } else { // Error occurred
2425 return -1;
2426 }
2427
2428 return ret_avg;
2429}
2430
2431#elif KMP_OS_AIX
2432
2433// The function returns number of running (not sleeping) threads, or -1 in case
2434// of error.
2435int __kmp_get_load_balance(int max) {
2436
2437 static int glb_running_threads = 0; // Saved count of the running threads for
2438 // the thread balance algorithm.
2439 static double glb_call_time = 0; // Thread balance algorithm call time.
2440 int running_threads = 0; // Number of running threads in the system.
2441
2442 double call_time = 0.0;
2443
2444 __kmp_elapsed(&call_time);
2445
2446 if (glb_call_time &&
2447 (call_time - glb_call_time < __kmp_load_balance_interval))
2448 return glb_running_threads;
2449
2450 glb_call_time = call_time;
2451
2452 if (max <= 0) {
2453 max = INT_MAX;
2454 }
2455
2456 // Check how many perfstat_cpu_t structures are available.
2457 int logical_cpus = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
2458 if (logical_cpus <= 0) {
2459 glb_call_time = -1;
2460 return -1;
2461 }
2462
2463 perfstat_cpu_t *cpu_stat = (perfstat_cpu_t *)KMP_INTERNAL_MALLOC(
2464 logical_cpus * sizeof(perfstat_cpu_t));
2465 if (cpu_stat == NULL) {
2466 glb_call_time = -1;
2467 return -1;
2468 }
2469
2470 // Set first CPU as the name of the first logical CPU for which the info is
2471 // desired.
2472 perfstat_id_t first_cpu_name;
2473 strcpy(first_cpu_name.name, FIRST_CPU);
2474
2475 // Get the stat info of logical CPUs.
2476 int rc = perfstat_cpu(&first_cpu_name, cpu_stat, sizeof(perfstat_cpu_t),
2477 logical_cpus);
2478 KMP_DEBUG_ASSERT(rc == logical_cpus);
2479 if (rc <= 0) {
2480 KMP_INTERNAL_FREE(cpu_stat);
2481 glb_call_time = -1;
2482 return -1;
2483 }
2484 for (int i = 0; i < logical_cpus; ++i) {
2485 running_threads += cpu_stat[i].runque;
2486 if (running_threads >= max)
2487 break;
2488 }
2489
2490 // There _might_ be a timing hole where the thread executing this
2491 // code gets skipped in the load balance, and running_threads is 0.
2492 // Assert in the debug builds only!!!
2493 KMP_DEBUG_ASSERT(running_threads > 0);
2494 if (running_threads <= 0)
2495 running_threads = 1;
2496
2497 KMP_INTERNAL_FREE(cpu_stat);
2498
2499 glb_running_threads = running_threads;
2500
2501 return running_threads;
2502}
2503
2504#else // Linux* OS
2505
2506// The function returns number of running (not sleeping) threads, or -1 in case
2507// of error. Error could be reported if Linux* OS kernel too old (without
2508// "/proc" support). Counting running threads stops if max running threads
2509// encountered.
2510int __kmp_get_load_balance(int max) {
2511 static int permanent_error = 0;
2512 static int glb_running_threads = 0; // Saved count of the running threads for
2513 // the thread balance algorithm
2514 static double glb_call_time = 0; /* Thread balance algorithm call time */
2515
2516 int running_threads = 0; // Number of running threads in the system.
2517
2518 DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2519 struct dirent *proc_entry = NULL;
2520
2521 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2522 DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2523 struct dirent *task_entry = NULL;
2524 int task_path_fixed_len;
2525
2526 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2527 int stat_file = -1;
2528 int stat_path_fixed_len;
2529
2530#ifdef KMP_DEBUG
2531 int total_processes = 0; // Total number of processes in system.
2532#endif
2533
2534 double call_time = 0.0;
2535
2536 __kmp_str_buf_init(&task_path);
2537 __kmp_str_buf_init(&stat_path);
2538
2539 __kmp_elapsed(&call_time);
2540
2541 if (glb_call_time &&
2542 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2543 running_threads = glb_running_threads;
2544 goto finish;
2545 }
2546
2547 glb_call_time = call_time;
2548
2549 // Do not spend time on scanning "/proc/" if we have a permanent error.
2550 if (permanent_error) {
2551 running_threads = -1;
2552 goto finish;
2553 }
2554
2555 if (max <= 0) {
2556 max = INT_MAX;
2557 }
2558
2559 // Open "/proc/" directory.
2560 proc_dir = opendir("/proc");
2561 if (proc_dir == NULL) {
2562 // Cannot open "/proc/". Probably the kernel does not support it. Return an
2563 // error now and in subsequent calls.
2564 running_threads = -1;
2565 permanent_error = 1;
2566 goto finish;
2567 }
2568
2569 // Initialize fixed part of task_path. This part will not change.
2570 __kmp_str_buf_cat(&task_path, "/proc/", 6);
2571 task_path_fixed_len = task_path.used; // Remember number of used characters.
2572
2573 proc_entry = readdir(proc_dir);
2574 while (proc_entry != NULL) {
2575 // Proc entry is a directory and name starts with a digit. Assume it is a
2576 // process' directory.
2577 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2578
2579#ifdef KMP_DEBUG
2580 ++total_processes;
2581#endif
2582 // Make sure init process is the very first in "/proc", so we can replace
2583 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2584 // 1. We are going to check that total_processes == 1 => d_name == "1" is
2585 // true (where "=>" is implication). Since C++ does not have => operator,
2586 // let us replace it with its equivalent: a => b == ! a || b.
2587 KMP_DEBUG_ASSERT(total_processes != 1 ||
2588 strcmp(proc_entry->d_name, "1") == 0);
2589
2590 // Construct task_path.
2591 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2592 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2593 KMP_STRLEN(proc_entry->d_name));
2594 __kmp_str_buf_cat(&task_path, "/task", 5);
2595
2596 task_dir = opendir(task_path.str);
2597 if (task_dir == NULL) {
2598 // Process can finish between reading "/proc/" directory entry and
2599 // opening process' "task/" directory. So, in general case we should not
2600 // complain, but have to skip this process and read the next one. But on
2601 // systems with no "task/" support we will spend lot of time to scan
2602 // "/proc/" tree again and again without any benefit. "init" process
2603 // (its pid is 1) should exist always, so, if we cannot open
2604 // "/proc/1/task/" directory, it means "task/" is not supported by
2605 // kernel. Report an error now and in the future.
2606 if (strcmp(proc_entry->d_name, "1") == 0) {
2607 running_threads = -1;
2608 permanent_error = 1;
2609 goto finish;
2610 }
2611 } else {
2612 // Construct fixed part of stat file path.
2613 __kmp_str_buf_clear(&stat_path);
2614 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2615 __kmp_str_buf_cat(&stat_path, "/", 1);
2616 stat_path_fixed_len = stat_path.used;
2617
2618 task_entry = readdir(task_dir);
2619 while (task_entry != NULL) {
2620 // It is a directory and name starts with a digit.
2621 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2622
2623 // Construct complete stat file path. Easiest way would be:
2624 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2625 // task_entry->d_name );
2626 // but seriae of __kmp_str_buf_cat works a bit faster.
2627 stat_path.used =
2628 stat_path_fixed_len; // Reset stat path to its fixed part.
2629 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2630 KMP_STRLEN(task_entry->d_name));
2631 __kmp_str_buf_cat(&stat_path, "/stat", 5);
2632
2633 // Note: Low-level API (open/read/close) is used. High-level API
2634 // (fopen/fclose) works ~ 30 % slower.
2635 stat_file = open(stat_path.str, O_RDONLY);
2636 if (stat_file == -1) {
2637 // We cannot report an error because task (thread) can terminate
2638 // just before reading this file.
2639 } else {
2640 /* Content of "stat" file looks like:
2641 24285 (program) S ...
2642
2643 It is a single line (if program name does not include funny
2644 symbols). First number is a thread id, then name of executable
2645 file name in paretheses, then state of the thread. We need just
2646 thread state.
2647
2648 Good news: Length of program name is 15 characters max. Longer
2649 names are truncated.
2650
2651 Thus, we need rather short buffer: 15 chars for program name +
2652 2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2653
2654 Bad news: Program name may contain special symbols like space,
2655 closing parenthesis, or even new line. This makes parsing
2656 "stat" file not 100 % reliable. In case of fanny program names
2657 parsing may fail (report incorrect thread state).
2658
2659 Parsing "status" file looks more promissing (due to different
2660 file structure and escaping special symbols) but reading and
2661 parsing of "status" file works slower.
2662 -- ln
2663 */
2664 char buffer[65];
2665 ssize_t len;
2666 len = read(stat_file, buffer, sizeof(buffer) - 1);
2667 if (len >= 0) {
2668 buffer[len] = 0;
2669 // Using scanf:
2670 // sscanf( buffer, "%*d (%*s) %c ", & state );
2671 // looks very nice, but searching for a closing parenthesis
2672 // works a bit faster.
2673 char *close_parent = strstr(buffer, ") ");
2674 if (close_parent != NULL) {
2675 char state = *(close_parent + 2);
2676 if (state == 'R') {
2677 ++running_threads;
2678 if (running_threads >= max) {
2679 goto finish;
2680 }
2681 }
2682 }
2683 }
2684 close(stat_file);
2685 stat_file = -1;
2686 }
2687 }
2688 task_entry = readdir(task_dir);
2689 }
2690 closedir(task_dir);
2691 task_dir = NULL;
2692 }
2693 }
2694 proc_entry = readdir(proc_dir);
2695 }
2696
2697 // There _might_ be a timing hole where the thread executing this
2698 // code get skipped in the load balance, and running_threads is 0.
2699 // Assert in the debug builds only!!!
2700 KMP_DEBUG_ASSERT(running_threads > 0);
2701 if (running_threads <= 0) {
2702 running_threads = 1;
2703 }
2704
2705finish: // Clean up and exit.
2706 if (proc_dir != NULL) {
2707 closedir(proc_dir);
2708 }
2709 __kmp_str_buf_free(&task_path);
2710 if (task_dir != NULL) {
2711 closedir(task_dir);
2712 }
2713 __kmp_str_buf_free(&stat_path);
2714 if (stat_file != -1) {
2715 close(stat_file);
2716 }
2717
2718 glb_running_threads = running_threads;
2719
2720 return running_threads;
2721
2722} // __kmp_get_load_balance
2723
2724#endif // KMP_OS_DARWIN
2725
2726#endif // USE_LOAD_BALANCE
2727
2728#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2729 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2730 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2731 KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF || \
2732 KMP_ARCH_AARCH64_32)
2733
2734// Because WebAssembly will use `call_indirect` to invoke the microtask and
2735// WebAssembly indirect calls check that the called signature is a precise
2736// match, we need to cast each microtask function pointer back from `void *` to
2737// its original type.
2738typedef void (*microtask_t0)(int *, int *);
2739typedef void (*microtask_t1)(int *, int *, void *);
2740typedef void (*microtask_t2)(int *, int *, void *, void *);
2741typedef void (*microtask_t3)(int *, int *, void *, void *, void *);
2742typedef void (*microtask_t4)(int *, int *, void *, void *, void *, void *);
2743typedef void (*microtask_t5)(int *, int *, void *, void *, void *, void *,
2744 void *);
2745typedef void (*microtask_t6)(int *, int *, void *, void *, void *, void *,
2746 void *, void *);
2747typedef void (*microtask_t7)(int *, int *, void *, void *, void *, void *,
2748 void *, void *, void *);
2749typedef void (*microtask_t8)(int *, int *, void *, void *, void *, void *,
2750 void *, void *, void *, void *);
2751typedef void (*microtask_t9)(int *, int *, void *, void *, void *, void *,
2752 void *, void *, void *, void *, void *);
2753typedef void (*microtask_t10)(int *, int *, void *, void *, void *, void *,
2754 void *, void *, void *, void *, void *, void *);
2755typedef void (*microtask_t11)(int *, int *, void *, void *, void *, void *,
2756 void *, void *, void *, void *, void *, void *,
2757 void *);
2758typedef void (*microtask_t12)(int *, int *, void *, void *, void *, void *,
2759 void *, void *, void *, void *, void *, void *,
2760 void *, void *);
2761typedef void (*microtask_t13)(int *, int *, void *, void *, void *, void *,
2762 void *, void *, void *, void *, void *, void *,
2763 void *, void *, void *);
2764typedef void (*microtask_t14)(int *, int *, void *, void *, void *, void *,
2765 void *, void *, void *, void *, void *, void *,
2766 void *, void *, void *, void *);
2767typedef void (*microtask_t15)(int *, int *, void *, void *, void *, void *,
2768 void *, void *, void *, void *, void *, void *,
2769 void *, void *, void *, void *, void *);
2770
2771// we really only need the case with 1 argument, because CLANG always build
2772// a struct of pointers to shared variables referenced in the outlined function
2773int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2774 void *p_argv[]
2775#if OMPT_SUPPORT
2776 ,
2777 void **exit_frame_ptr
2778#endif
2779) {
2780#if OMPT_SUPPORT
2781 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2782#endif
2783
2784 switch (argc) {
2785 default:
2786 fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2787 fflush(stderr);
2788 exit(-1);
2789 case 0:
2790 (*(microtask_t0)pkfn)(&gtid, &tid);
2791 break;
2792 case 1:
2793 (*(microtask_t1)pkfn)(&gtid, &tid, p_argv[0]);
2794 break;
2795 case 2:
2796 (*(microtask_t2)pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2797 break;
2798 case 3:
2799 (*(microtask_t3)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2800 break;
2801 case 4:
2802 (*(microtask_t4)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2803 p_argv[3]);
2804 break;
2805 case 5:
2806 (*(microtask_t5)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2807 p_argv[3], p_argv[4]);
2808 break;
2809 case 6:
2810 (*(microtask_t6)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2811 p_argv[3], p_argv[4], p_argv[5]);
2812 break;
2813 case 7:
2814 (*(microtask_t7)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2815 p_argv[3], p_argv[4], p_argv[5], p_argv[6]);
2816 break;
2817 case 8:
2818 (*(microtask_t8)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2819 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2820 p_argv[7]);
2821 break;
2822 case 9:
2823 (*(microtask_t9)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2824 p_argv[3], p_argv[4], p_argv[5], p_argv[6], p_argv[7],
2825 p_argv[8]);
2826 break;
2827 case 10:
2828 (*(microtask_t10)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2829 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2830 p_argv[7], p_argv[8], p_argv[9]);
2831 break;
2832 case 11:
2833 (*(microtask_t11)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2834 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2835 p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2836 break;
2837 case 12:
2838 (*(microtask_t12)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2839 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2840 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2841 p_argv[11]);
2842 break;
2843 case 13:
2844 (*(microtask_t13)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2845 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2846 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2847 p_argv[11], p_argv[12]);
2848 break;
2849 case 14:
2850 (*(microtask_t14)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2851 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2852 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2853 p_argv[11], p_argv[12], p_argv[13]);
2854 break;
2855 case 15:
2856 (*(microtask_t15)pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2],
2857 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2858 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2859 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2860 break;
2861 }
2862
2863 return 1;
2864}
2865
2866#endif
2867
2868#if KMP_OS_LINUX
2869// Functions for hidden helper task
2870namespace {
2871// Condition variable for initializing hidden helper team
2872pthread_cond_t hidden_helper_threads_initz_cond_var;
2873pthread_mutex_t hidden_helper_threads_initz_lock;
2874volatile int hidden_helper_initz_signaled = FALSE;
2875
2876// Condition variable for deinitializing hidden helper team
2877pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2878pthread_mutex_t hidden_helper_threads_deinitz_lock;
2879volatile int hidden_helper_deinitz_signaled = FALSE;
2880
2881// Condition variable for the wrapper function of main thread
2882pthread_cond_t hidden_helper_main_thread_cond_var;
2883pthread_mutex_t hidden_helper_main_thread_lock;
2884volatile int hidden_helper_main_thread_signaled = FALSE;
2885
2886// Semaphore for worker threads. We don't use condition variable here in case
2887// that when multiple signals are sent at the same time, only one thread might
2888// be waken.
2889sem_t hidden_helper_task_sem;
2890} // namespace
2891
2893 int status = sem_wait(&hidden_helper_task_sem);
2894 KMP_CHECK_SYSFAIL("sem_wait", status);
2895}
2896
2898 // Initialize condition variable
2899 int status =
2900 pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2901 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2902
2903 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2904 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2905
2906 status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2907 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2908
2909 status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2910 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2911
2912 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2913 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2914
2915 status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2916 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2917
2918 // Initialize the semaphore
2919 status = sem_init(&hidden_helper_task_sem, 0, 0);
2920 KMP_CHECK_SYSFAIL("sem_init", status);
2921
2922 // Create a new thread to finish initialization
2923 pthread_t handle;
2924 status = pthread_create(
2925 &handle, nullptr,
2926 [](void *) -> void * {
2928 return nullptr;
2929 },
2930 nullptr);
2931 KMP_CHECK_SYSFAIL("pthread_create", status);
2932}
2933
2935 // Initial thread waits here for the completion of the initialization. The
2936 // condition variable will be notified by main thread of hidden helper teams.
2937 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2938 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2939
2940 if (!TCR_4(hidden_helper_initz_signaled)) {
2941 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2942 &hidden_helper_threads_initz_lock);
2943 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2944 }
2945
2946 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2947 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2948}
2949
2951 // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2952 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2953 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2954
2955 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2956 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2957
2958 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2959
2960 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2961 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2962}
2963
2965 // The main thread of hidden helper team will be blocked here. The
2966 // condition variable can only be signal in the destructor of RTL.
2967 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2968 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2969
2970 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2971 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2972 &hidden_helper_main_thread_lock);
2973 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2974 }
2975
2976 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2977 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2978}
2979
2981 // The initial thread of OpenMP RTL should call this function to wake up the
2982 // main thread of hidden helper team.
2983 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2984 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2985
2986 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2987 KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2988
2989 // The hidden helper team is done here
2990 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2991
2992 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2993 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2994}
2995
2997 int status = sem_post(&hidden_helper_task_sem);
2998 KMP_CHECK_SYSFAIL("sem_post", status);
2999}
3000
3002 // Initial thread waits here for the completion of the deinitialization. The
3003 // condition variable will be notified by main thread of hidden helper teams.
3004 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
3005 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
3006
3007 if (!TCR_4(hidden_helper_deinitz_signaled)) {
3008 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
3009 &hidden_helper_threads_deinitz_lock);
3010 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
3011 }
3012
3013 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
3014 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
3015}
3016
3018 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
3019 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
3020
3021 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
3022 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
3023
3024 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
3025
3026 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
3027 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
3028}
3029#else // KMP_OS_LINUX
3031 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3032}
3033
3035 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3036}
3037
3039 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3040}
3041
3043 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3044}
3045
3047 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3048}
3049
3051 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3052}
3053
3055 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3056}
3057
3059 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3060}
3061
3063 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
3064}
3065#endif // KMP_OS_LINUX
3066
3068 DIR *dir = opendir("/dev/shm");
3069 if (dir) { // /dev/shm exists
3070 closedir(dir);
3071 return true;
3072 } else if (ENOENT == errno) { // /dev/shm does not exist
3073 return false;
3074 } else { // opendir() failed
3075 return false;
3076 }
3077}
3078
3080 DIR *dir = opendir("/tmp");
3081 if (dir) { // /tmp exists
3082 closedir(dir);
3083 return true;
3084 } else if (ENOENT == errno) { // /tmp does not exist
3085 return false;
3086 } else { // opendir() failed
3087 return false;
3088 }
3089}
3090
3091// end of file //
char buf[BUFFER_SIZE]
int task_entry(kmp_int32 gtid, kmp_task_t *task)
int result[2]
int64_t kmp_int64
Definition: common.h:10
void stop(char *errorMsg)
__itt_string_handle * name
Definition: ittnotify.h:3305
void
Definition: ittnotify.h:3324
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void * addr
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope ITT_FORMAT d const __itt_domain __itt_scope __itt_string_handle const char size_t ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_relation __itt_id ITT_FORMAT lu __itt_track_group __itt_string_handle __itt_track_group_type ITT_FORMAT d __itt_track ITT_FORMAT p void int const int int const char int ITT_FORMAT d void void const char * path
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
volatile kmp_team_t * __kmp_team_pool
Definition: kmp_global.cpp:459
int __kmp_generate_warnings
Definition: kmp_global.cpp:120
volatile int __kmp_init_user_locks
Definition: kmp_global.cpp:57
#define KMP_INTERNAL_MALLOC(sz)
Definition: kmp.h:122
#define KMP_CPU_PAUSE()
Definition: kmp.h:1564
kmp_bootstrap_lock_t __kmp_initz_lock
kmp_global_t __kmp_global
Definition: kmp_global.cpp:467
kmp_pause_status_t __kmp_pause_status
Definition: kmp_global.cpp:558
#define KMP_MAX_BLOCKTIME
Definition: kmp.h:1228
void __kmp_check_stack_overlap(kmp_info_t *thr)
@ kmp_soft_paused
Definition: kmp.h:4526
#define KMP_INTERNAL_REALLOC(p, sz)
Definition: kmp.h:124
int __kmp_xproc
Definition: kmp_global.cpp:122
int __kmp_debug_buf
Definition: kmp_global.cpp:383
#define KMP_MAX_NTH
Definition: kmp.h:1176
int __kmp_dflt_blocktime
Definition: kmp_global.cpp:158
volatile kmp_info_t * __kmp_thread_pool
Definition: kmp_global.cpp:458
volatile int __kmp_init_gtid
Definition: kmp_global.cpp:45
#define KMP_USEC_PER_SEC
Definition: kmp.h:194
kmp_bootstrap_lock_t __kmp_task_team_lock
size_t __kmp_stksize
Definition: kmp_global.cpp:69
kmp_nested_proc_bind_t __kmp_nested_proc_bind
Definition: kmp_global.cpp:291
#define KMP_GTID_SHUTDOWN
Definition: kmp.h:1028
kmp_cached_addr_t * __kmp_threadpriv_cache_list
Definition: kmp_global.cpp:60
@ flag_unset
Definition: kmp.h:2128
volatile int __kmp_all_nth
Definition: kmp_global.cpp:457
void __kmp_check_stksize(size_t *val)
#define KMP_GTID_MONITOR
Definition: kmp.h:1029
volatile int __kmp_init_common
Definition: kmp_global.cpp:46
static bool KMP_UBER_GTID(int gtid)
Definition: kmp.h:3612
#define KMP_DEFAULT_STKSIZE
Definition: kmp.h:1206
volatile int __kmp_init_middle
Definition: kmp_global.cpp:48
volatile int __kmp_nth
Definition: kmp_global.cpp:456
std::atomic< int > __kmp_thread_pool_active_nth
Definition: kmp_global.cpp:462
void __kmp_hidden_helper_threads_initz_routine()
volatile int __kmp_need_register_serial
Definition: kmp_global.cpp:47
kmp_bootstrap_lock_t __kmp_forkjoin_lock
void * __kmp_launch_thread(kmp_info_t *thr)
kmp_info_t ** __kmp_threads
Definition: kmp_global.cpp:450
#define KMP_TLS_GTID_MIN
Definition: kmp.h:1312
int __kmp_need_register_atfork
Definition: kmp_global.cpp:416
void __kmp_internal_end_dest(void *)
void __kmp_unregister_library(void)
#define KMP_YIELD(cond)
Definition: kmp.h:1582
volatile int __kmp_init_parallel
Definition: kmp_global.cpp:49
int __kmp_sys_max_nth
Definition: kmp_global.cpp:125
#define TRUE
Definition: kmp.h:1324
#define FALSE
Definition: kmp.h:1323
kmp_key_t __kmp_gtid_threadprivate_key
Definition: kmp_global.cpp:19
kmp_info_t * __kmp_thread_pool_insert_pt
kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker)
#define KMP_MIN_STKSIZE
Definition: kmp.h:1184
size_t __kmp_sys_min_stksize
Definition: kmp_global.cpp:124
#define KMP_NSEC_PER_SEC
Definition: kmp.h:193
size_t __kmp_stkoffset
Definition: kmp_global.cpp:73
#define KMP_INTERNAL_FREE(p)
Definition: kmp.h:123
int __kmp_threads_capacity
Definition: kmp_global.cpp:130
#define __kmp_get_gtid()
Definition: kmp.h:3600
void __kmp_serial_initialize(void)
kmp_uint32 __kmp_wait_4(kmp_uint32 volatile *spinner, kmp_uint32 checker, kmp_uint32(*pred)(kmp_uint32, kmp_uint32), void *obj)
@ proc_bind_false
Definition: kmp.h:959
volatile int __kmp_init_serial
Definition: kmp_global.cpp:44
static void __kmp_type_convert(T1 src, T2 *dest)
Definition: kmp.h:4855
int __kmp_env_stksize
Definition: kmp_global.cpp:420
#define KMP_GTID_DNE
Definition: kmp.h:1027
void __kmp_cleanup(void)
union KMP_ALIGN_CACHE kmp_info kmp_info_t
int __kmp_tls_gtid_min
Definition: kmp_global.cpp:188
kmp_uint64 __kmp_hardware_timestamp(void)
kmp_topology_t * __kmp_topology
KMP_ARCH_X86 KMP_ARCH_X86 long double
rd
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 kmp_int8
Definition: kmp_atomic.cpp:985
void __kmp_dump_debug_buffer(void)
Definition: kmp_debug.cpp:84
void __kmp_debug_printf(char const *format,...)
Definition: kmp_debug.cpp:29
#define KA_TRACE(d, x)
Definition: kmp_debug.h:157
#define KMP_ASSERT(cond)
Definition: kmp_debug.h:59
#define KF_TRACE(d, x)
Definition: kmp_debug.h:162
#define KC_TRACE(d, x)
Definition: kmp_debug.h:159
#define KMP_DEBUG_ASSERT(cond)
Definition: kmp_debug.h:61
#define KB_TRACE(d, x)
Definition: kmp_debug.h:158
#define KMP_ASSERT2(cond, msg)
Definition: kmp_debug.h:60
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
Definition: kmp_i18n.cpp:48
kmp_msg_t __kmp_msg_null
Definition: kmp_i18n.cpp:36
static void __kmp_msg(kmp_msg_severity_t severity, kmp_msg_t message, va_list ap)
Definition: kmp_i18n.cpp:789
void __kmp_fatal(kmp_msg_t message,...)
Definition: kmp_i18n.cpp:864
#define KMP_CHECK_SYSFAIL(func, error)
Definition: kmp_i18n.h:152
#define KMP_WARNING(...)
Definition: kmp_i18n.h:144
#define KMP_MSG(...)
Definition: kmp_i18n.h:121
@ kmp_ms_warning
Definition: kmp_i18n.h:130
#define KMP_CHECK_SYSFAIL_ERRNO(func, status)
Definition: kmp_i18n.h:160
#define KMP_SYSFAIL(func, error)
Definition: kmp_i18n.h:147
#define KMP_HNT(...)
Definition: kmp_i18n.h:122
#define KMP_ERR
Definition: kmp_i18n.h:125
kmp_bootstrap_lock_t __kmp_console_lock
Definition: kmp_io.cpp:43
kmp_bootstrap_lock_t __kmp_stdio_lock
Definition: kmp_io.cpp:41
void __kmp_printf(char const *format,...)
Definition: kmp_io.cpp:186
int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
Definition: kmp_lock.cpp:118
kmp_block_of_locks * __kmp_lock_blocks
Definition: kmp_lock.cpp:3792
int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
Definition: kmp_lock.cpp:157
kmp_lock_table_t __kmp_user_lock_table
Definition: kmp_lock.cpp:3788
static void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck)
Definition: kmp_lock.h:535
static int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck)
Definition: kmp_lock.h:527
static void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck)
Definition: kmp_lock.h:539
#define TCW_PTR(a, b)
Definition: kmp_os.h:1165
void(* microtask_t)(int *gtid, int *npr,...)
Definition: kmp_os.h:1183
#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv)
Definition: kmp_os.h:859
#define TCR_1(a)
Definition: kmp_os.h:1133
#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv)
Definition: kmp_os.h:816
#define KMP_ATOMIC_ST_REL(p, v)
Definition: kmp_os.h:1259
bool __kmp_atomic_compare_store(std::atomic< T > *p, T expected, T desired)
Definition: kmp_os.h:1274
@ kmp_warnings_off
Definition: kmp_os.h:1239
#define KMP_ATOMIC_LD_ACQ(p)
Definition: kmp_os.h:1257
#define STATIC_EFI2_WORKAROUND
Definition: kmp_os.h:1204
#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv)
Definition: kmp_os.h:804
#define TCW_SYNC_4(a, b)
Definition: kmp_os.h:1144
#define CCAST(type, var)
Definition: kmp_os.h:290
#define KMP_MB()
Definition: kmp_os.h:1064
#define TCR_4(a)
Definition: kmp_os.h:1135
#define KMP_ATOMIC_DEC(p)
Definition: kmp_os.h:1268
#define KMP_ATOMIC_LD_RLX(p)
Definition: kmp_os.h:1258
#define TCW_4(a, b)
Definition: kmp_os.h:1136
#define TCR_8(a)
Definition: kmp_os.h:1139
#define KMP_ATOMIC_INC(p)
Definition: kmp_os.h:1267
#define KMP_ALLOCA
#define KMP_SNPRINTF
#define KMP_STRLEN
Functions for collecting statistics.
#define KMP_INIT_PARTITIONED_TIMERS(name)
Definition: kmp_stats.h:1012
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n)
Definition: kmp_stats.h:1008
#define KMP_SET_THREAD_STATE(state_name)
Definition: kmp_stats.h:1016
void __kmp_str_buf_clear(kmp_str_buf_t *buffer)
Definition: kmp_str.cpp:71
void __kmp_str_buf_free(kmp_str_buf_t *buffer)
Definition: kmp_str.cpp:123
char * __kmp_str_format(char const *format,...)
Definition: kmp_str.cpp:448
void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, size_t len)
Definition: kmp_str.cpp:134
#define args
void __kmp_str_free(char **str)
Definition: kmp_str.cpp:494
#define __kmp_str_buf_init(b)
Definition: kmp_str.h:40
#define i
Definition: kmp_stub.cpp:87
static void __kmp_null_resume_wrapper(kmp_info_t *thr)
int32_t kmp_int32
#define C
#define res
if(ret)
#define OMPT_GET_FRAME_ADDRESS(level)
static id loc
volatile int flag
#define delay(t)
Definition: ompt-signal.h:6
struct kmp_cached_addr * next
Definition: kmp.h:1808
void ** addr
Definition: kmp.h:1805
kmp_lock_index_t allocated
Definition: kmp_lock.h:962
kmp_user_lock_p * table
Definition: kmp_lock.h:963
kmp_lock_index_t used
Definition: kmp_lock.h:961
char * str
Definition: kmp_i18n.h:105
kmp_proc_bind_t * bind_types
Definition: kmp.h:969
int used
Definition: kmp_str.h:35
char * str
Definition: kmp_str.h:33
long minflt
Definition: kmp.h:1716
long inblock
Definition: kmp.h:1719
long majflt
Definition: kmp.h:1717
long nvcsw
Definition: kmp.h:1721
long oublock
Definition: kmp.h:1720
long nswap
Definition: kmp.h:1718
long nivcsw
Definition: kmp.h:1722
long maxrss
Definition: kmp.h:1715
struct timespec start
static int err
Definition: teams-no-par.c:16
void(* microtask_t13)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
bool __kmp_detect_shm()
static void __kmp_atfork_child(void)
void __kmp_hidden_helper_worker_thread_signal()
template void __kmp_atomic_resume_64< false, true >(int, kmp_atomic_flag_64< false, true > *)
template void __kmp_suspend_64< true, false >(int, kmp_flag_64< true, false > *)
void(* microtask_t5)(int *, int *, void *, void *, void *, void *, void *)
static kmp_mutex_align_t __kmp_wait_mx
void __kmp_read_system_time(double *delta)
void(* microtask_t0)(int *, int *)
void __kmp_hidden_helper_threads_initz_wait()
static void __kmp_suspend_template(int th_gtid, C *flag)
void __kmp_enable(int new_state)
void __kmp_yield()
void __kmp_unlock_suspend_mx(kmp_info_t *th)
void(* microtask_t7)(int *, int *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_do_initialize_hidden_helper_threads()
static pthread_mutexattr_t __kmp_suspend_mutex_attr
void(* microtask_t1)(int *, int *, void *)
void __kmp_thread_sleep(int millis)
static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th)
void __kmp_suspend_64(int th_gtid, kmp_flag_64< C, S > *flag)
static int __kmp_init_runtime
static void __kmp_atfork_parent(void)
void __kmp_hidden_helper_main_thread_release()
template void __kmp_resume_32< false, false >(int, kmp_flag_32< false, false > *)
void __kmp_suspend_initialize(void)
#define TS2NS(timespec)
kmp_uint64 __kmp_now_nsec()
kmp_uint64 __kmp_ticks_per_usec
int __kmp_is_address_mapped(void *addr)
#define TIMEVAL_TO_TIMESPEC(tv, ts)
double __kmp_read_cpu_time(void)
void(* microtask_t12)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_suspend_32(int th_gtid, kmp_flag_32< C, S > *flag)
void __kmp_hidden_helper_worker_thread_wait()
template void __kmp_resume_32< false, true >(int, kmp_flag_32< false, true > *)
void __kmp_disable(int *old_state)
static pthread_condattr_t __kmp_suspend_cond_attr
void __kmp_reap_monitor(kmp_info_t *th)
template void __kmp_atomic_suspend_64< true, false >(int, kmp_atomic_flag_64< true, false > *)
void __kmp_hidden_helper_threads_deinitz_wait()
kmp_uint64 __kmp_ticks_per_msec
template void __kmp_atomic_suspend_64< false, true >(int, kmp_atomic_flag_64< false, true > *)
int __kmp_gtid_get_specific()
void __kmp_hidden_helper_main_thread_wait()
void(* microtask_t4)(int *, int *, void *, void *, void *, void *)
static struct kmp_sys_timer __kmp_sys_timer_data
void(* microtask_t6)(int *, int *, void *, void *, void *, void *, void *, void *)
void(* microtask_t14)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void(* microtask_t11)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc, void *p_argv[])
void __kmp_hidden_helper_initz_release()
void __kmp_suspend_uninitialize_thread(kmp_info_t *th)
static int __kmp_fork_count
void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag)
void __kmp_lock_suspend_mx(kmp_info_t *th)
bool __kmp_detect_tmp()
void __kmp_terminate_thread(int gtid)
void(* microtask_t2)(int *, int *, void *, void *)
void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size)
template void __kmp_resume_64< false, true >(int, kmp_flag_64< false, true > *)
static void __kmp_atfork_prepare(void)
int __kmp_read_from_file(char const *path, char const *format,...)
void __kmp_elapsed(double *t)
void(* microtask_t9)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_clear_system_time(void)
template void __kmp_suspend_64< false, true >(int, kmp_flag_64< false, true > *)
static void __kmp_resume_template(int target_gtid, C *flag)
void __kmp_runtime_destroy(void)
int __kmp_read_system_info(struct kmp_sys_info *info)
static void * __kmp_launch_worker(void *thr)
template void __kmp_suspend_32< false, false >(int, kmp_flag_32< false, false > *)
void(* microtask_t15)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag)
void(* microtask_t10)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_runtime_initialize(void)
int __kmp_try_suspend_mx(kmp_info_t *th)
void __kmp_resume_64(int target_gtid, kmp_flag_64< C, S > *flag)
void __kmp_resume_32(int target_gtid, kmp_flag_32< C, S > *flag)
void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64< C, S > *flag)
static kmp_cond_align_t __kmp_wait_cv
void __kmp_elapsed_tick(double *t)
void __kmp_gtid_set_specific(int gtid)
static int __kmp_get_xproc(void)
void(* microtask_t3)(int *, int *, void *, void *, void *)
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
void(* microtask_t8)(int *, int *, void *, void *, void *, void *, void *, void *, void *, void *)
void __kmp_suspend_initialize_thread(kmp_info_t *th)
void __kmp_exit_thread(int exit_status)
void __kmp_register_atfork(void)
void __kmp_reap_worker(kmp_info_t *th)
void __kmp_hidden_helper_threads_deinitz_release()
void __kmp_affinity_determine_capable(const char *env_var)
void __kmp_affinity_bind_thread(int proc)
int __kmp_get_load_balance(int max)
void __kmp_initialize_system_tick(void)