25#include <sys/syscall.h>
51 for (
i = 0;
i < 8; ++
i, ++x, ++y) {
75 return lck->lk.depth_locked != -1;
78__forceinline
static int
82#ifdef USE_LOCK_PROFILE
84 if ((curr != 0) && (curr != gtid + 1))
108 if (!__kmp_tpause_enabled)
125 char const *
const func =
"omp_set_lock";
149 char const *
const func =
"omp_test_lock";
170 char const *
const func =
"omp_unset_lock";
193 char const *
const func =
"omp_destroy_lock";
210 lck->lk.depth_locked += 1;
214 lck->lk.depth_locked = 1;
221 char const *
const func =
"omp_set_nest_lock";
234 retval = ++
lck->lk.depth_locked;
239 retval =
lck->lk.depth_locked = 1;
246 char const *
const func =
"omp_test_nest_lock";
257 if (--(
lck->lk.depth_locked) == 0) {
266 char const *
const func =
"omp_unset_nest_lock";
282 lck->lk.depth_locked = 0;
287 lck->lk.depth_locked = 0;
291 char const *
const func =
"omp_destroy_nest_lock";
311static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *
lck) {
315static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *
lck) {
316 return lck->lk.depth_locked != -1;
319__forceinline
static int
320__kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *
lck,
kmp_int32 gtid) {
325#ifdef USE_LOCK_PROFILE
327 if ((curr != 0) && (curr != gtid_code))
333 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
334 lck,
lck->lk.poll, gtid));
345 (
"__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
346 lck, gtid, poll_val, cond));
361 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
362 lck,
lck->lk.poll, gtid));
368 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n",
lck,
369 lck->lk.poll, gtid));
374 (
"__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
375 lck, gtid, poll_val));
378 if ((rc = syscall(__NR_futex, &(
lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
380 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) "
381 "failed (rc=%ld errno=%d)\n",
382 lck, gtid, poll_val, rc, errno));
387 (
"__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
388 lck, gtid, poll_val));
396 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n",
lck,
397 lck->lk.poll, gtid));
401int __kmp_acquire_futex_lock(kmp_futex_lock_t *
lck,
kmp_int32 gtid) {
402 int retval = __kmp_acquire_futex_lock_timed_template(
lck, gtid);
406static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *
lck,
408 char const *
const func =
"omp_set_lock";
410 __kmp_is_futex_lock_nestable(
lck)) {
413 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(
lck) == gtid)) {
416 return __kmp_acquire_futex_lock(
lck, gtid);
419int __kmp_test_futex_lock(kmp_futex_lock_t *
lck,
kmp_int32 gtid) {
428static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *
lck,
430 char const *
const func =
"omp_test_lock";
432 __kmp_is_futex_lock_nestable(
lck)) {
435 return __kmp_test_futex_lock(
lck, gtid);
438int __kmp_release_futex_lock(kmp_futex_lock_t *
lck,
kmp_int32 gtid) {
441 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
442 lck,
lck->lk.poll, gtid));
449 (
"__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
450 lck, gtid, poll_val));
454 (
"__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
462 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n",
lck,
463 lck->lk.poll, gtid));
469static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *
lck,
471 char const *
const func =
"omp_unset_lock";
474 __kmp_is_futex_lock_nestable(
lck)) {
477 if (__kmp_get_futex_lock_owner(
lck) == -1) {
480 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(
lck) >= 0) &&
481 (__kmp_get_futex_lock_owner(
lck) != gtid)) {
484 return __kmp_release_futex_lock(
lck, gtid);
487void __kmp_init_futex_lock(kmp_futex_lock_t *
lck) {
491void __kmp_destroy_futex_lock(kmp_futex_lock_t *
lck) {
lck->lk.poll = 0; }
493static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *
lck) {
494 char const *
const func =
"omp_destroy_lock";
496 __kmp_is_futex_lock_nestable(
lck)) {
499 if (__kmp_get_futex_lock_owner(
lck) != -1) {
502 __kmp_destroy_futex_lock(
lck);
507int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *
lck,
kmp_int32 gtid) {
510 if (__kmp_get_futex_lock_owner(
lck) == gtid) {
511 lck->lk.depth_locked += 1;
514 __kmp_acquire_futex_lock_timed_template(
lck, gtid);
515 lck->lk.depth_locked = 1;
520static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *
lck,
522 char const *
const func =
"omp_set_nest_lock";
523 if (!__kmp_is_futex_lock_nestable(
lck)) {
526 return __kmp_acquire_nested_futex_lock(
lck, gtid);
529int __kmp_test_nested_futex_lock(kmp_futex_lock_t *
lck,
kmp_int32 gtid) {
534 if (__kmp_get_futex_lock_owner(
lck) == gtid) {
535 retval = ++
lck->lk.depth_locked;
536 }
else if (!__kmp_test_futex_lock(
lck, gtid)) {
540 retval =
lck->lk.depth_locked = 1;
545static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *
lck,
547 char const *
const func =
"omp_test_nest_lock";
548 if (!__kmp_is_futex_lock_nestable(
lck)) {
551 return __kmp_test_nested_futex_lock(
lck, gtid);
554int __kmp_release_nested_futex_lock(kmp_futex_lock_t *
lck,
kmp_int32 gtid) {
558 if (--(
lck->lk.depth_locked) == 0) {
559 __kmp_release_futex_lock(
lck, gtid);
565static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *
lck,
567 char const *
const func =
"omp_unset_nest_lock";
569 if (!__kmp_is_futex_lock_nestable(
lck)) {
572 if (__kmp_get_futex_lock_owner(
lck) == -1) {
575 if (__kmp_get_futex_lock_owner(
lck) != gtid) {
578 return __kmp_release_nested_futex_lock(
lck, gtid);
581void __kmp_init_nested_futex_lock(kmp_futex_lock_t *
lck) {
582 __kmp_init_futex_lock(
lck);
583 lck->lk.depth_locked = 0;
586void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *
lck) {
587 __kmp_destroy_futex_lock(
lck);
588 lck->lk.depth_locked = 0;
591static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *
lck) {
592 char const *
const func =
"omp_destroy_nest_lock";
593 if (!__kmp_is_futex_lock_nestable(
lck)) {
596 if (__kmp_get_futex_lock_owner(
lck) != -1) {
599 __kmp_destroy_nested_futex_lock(
lck);
608 return std::atomic_load_explicit(&
lck->lk.owner_id,
609 std::memory_order_relaxed) -
614 return std::atomic_load_explicit(&
lck->lk.depth_locked,
615 std::memory_order_relaxed) != -1;
619 return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
620 std::memory_order_acquire) == my_ticket;
623__forceinline
static int
626 kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
627 &
lck->lk.next_ticket, 1U, std::memory_order_relaxed);
629#ifdef USE_LOCK_PROFILE
630 if (std::atomic_load_explicit(&
lck->lk.now_serving,
631 std::memory_order_relaxed) != my_ticket)
636 if (std::atomic_load_explicit(&
lck->lk.now_serving,
637 std::memory_order_acquire) == my_ticket) {
651 char const *
const func =
"omp_set_lock";
653 if (!std::atomic_load_explicit(&
lck->lk.initialized,
654 std::memory_order_relaxed)) {
657 if (
lck->lk.self !=
lck) {
669 std::atomic_store_explicit(&
lck->lk.owner_id, gtid + 1,
670 std::memory_order_relaxed);
675 kmp_uint32 my_ticket = std::atomic_load_explicit(&
lck->lk.next_ticket,
676 std::memory_order_relaxed);
678 if (std::atomic_load_explicit(&
lck->lk.now_serving,
679 std::memory_order_relaxed) == my_ticket) {
681 if (std::atomic_compare_exchange_strong_explicit(
682 &
lck->lk.next_ticket, &my_ticket, next_ticket,
683 std::memory_order_acquire, std::memory_order_acquire)) {
692 char const *
const func =
"omp_test_lock";
694 if (!std::atomic_load_explicit(&
lck->lk.initialized,
695 std::memory_order_relaxed)) {
698 if (
lck->lk.self !=
lck) {
708 std::atomic_store_explicit(&
lck->lk.owner_id, gtid + 1,
709 std::memory_order_relaxed);
715 kmp_uint32 distance = std::atomic_load_explicit(&
lck->lk.next_ticket,
716 std::memory_order_relaxed) -
717 std::atomic_load_explicit(&
lck->lk.now_serving,
718 std::memory_order_relaxed);
720 std::atomic_fetch_add_explicit(&
lck->lk.now_serving, 1U,
721 std::memory_order_release);
730 char const *
const func =
"omp_unset_lock";
732 if (!std::atomic_load_explicit(&
lck->lk.initialized,
733 std::memory_order_relaxed)) {
736 if (
lck->lk.self !=
lck) {
749 std::atomic_store_explicit(&
lck->lk.owner_id, 0, std::memory_order_relaxed);
754 lck->lk.location = NULL;
756 std::atomic_store_explicit(&
lck->lk.next_ticket, 0U,
757 std::memory_order_relaxed);
758 std::atomic_store_explicit(&
lck->lk.now_serving, 0U,
759 std::memory_order_relaxed);
760 std::atomic_store_explicit(
761 &
lck->lk.owner_id, 0,
762 std::memory_order_relaxed);
763 std::atomic_store_explicit(
764 &
lck->lk.depth_locked, -1,
765 std::memory_order_relaxed);
766 std::atomic_store_explicit(&
lck->lk.initialized,
true,
767 std::memory_order_release);
771 std::atomic_store_explicit(&
lck->lk.initialized,
false,
772 std::memory_order_release);
774 lck->lk.location = NULL;
775 std::atomic_store_explicit(&
lck->lk.next_ticket, 0U,
776 std::memory_order_relaxed);
777 std::atomic_store_explicit(&
lck->lk.now_serving, 0U,
778 std::memory_order_relaxed);
779 std::atomic_store_explicit(&
lck->lk.owner_id, 0, std::memory_order_relaxed);
780 std::atomic_store_explicit(&
lck->lk.depth_locked, -1,
781 std::memory_order_relaxed);
785 char const *
const func =
"omp_destroy_lock";
787 if (!std::atomic_load_explicit(&
lck->lk.initialized,
788 std::memory_order_relaxed)) {
791 if (
lck->lk.self !=
lck) {
809 std::atomic_fetch_add_explicit(&
lck->lk.depth_locked, 1,
810 std::memory_order_relaxed);
814 std::atomic_store_explicit(&
lck->lk.depth_locked, 1,
815 std::memory_order_relaxed);
816 std::atomic_store_explicit(&
lck->lk.owner_id, gtid + 1,
817 std::memory_order_relaxed);
824 char const *
const func =
"omp_set_nest_lock";
826 if (!std::atomic_load_explicit(&
lck->lk.initialized,
827 std::memory_order_relaxed)) {
830 if (
lck->lk.self !=
lck) {
845 retval = std::atomic_fetch_add_explicit(&
lck->lk.depth_locked, 1,
846 std::memory_order_relaxed) +
851 std::atomic_store_explicit(&
lck->lk.depth_locked, 1,
852 std::memory_order_relaxed);
853 std::atomic_store_explicit(&
lck->lk.owner_id, gtid + 1,
854 std::memory_order_relaxed);
862 char const *
const func =
"omp_test_nest_lock";
864 if (!std::atomic_load_explicit(&
lck->lk.initialized,
865 std::memory_order_relaxed)) {
868 if (
lck->lk.self !=
lck) {
880 if ((std::atomic_fetch_add_explicit(&
lck->lk.depth_locked, -1,
881 std::memory_order_relaxed) -
883 std::atomic_store_explicit(&
lck->lk.owner_id, 0, std::memory_order_relaxed);
892 char const *
const func =
"omp_unset_nest_lock";
894 if (!std::atomic_load_explicit(&
lck->lk.initialized,
895 std::memory_order_relaxed)) {
898 if (
lck->lk.self !=
lck) {
915 std::atomic_store_explicit(&
lck->lk.depth_locked, 0,
916 std::memory_order_relaxed);
922 std::atomic_store_explicit(&
lck->lk.depth_locked, 0,
923 std::memory_order_relaxed);
928 char const *
const func =
"omp_destroy_nest_lock";
930 if (!std::atomic_load_explicit(&
lck->lk.initialized,
931 std::memory_order_relaxed)) {
934 if (
lck->lk.self !=
lck) {
949 return lck->lk.location;
958 return lck->lk.flags;
963 lck->lk.flags = flags;
1021#ifdef DEBUG_QUEUING_LOCKS
1024#define TRACE_BUF_ELE 1024
1025static char traces[TRACE_BUF_ELE][128] = {0};
1027#define TRACE_LOCK(X, Y) \
1028 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y);
1029#define TRACE_LOCK_T(X, Y, Z) \
1030 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z);
1031#define TRACE_LOCK_HT(X, Y, Z, Q) \
1032 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \
1042 i = tc % TRACE_BUF_ELE;
1044 i = (
i + 1) % TRACE_BUF_ELE;
1045 while (
i != (tc % TRACE_BUF_ELE)) {
1047 i = (
i + 1) % TRACE_BUF_ELE;
1052 "next_wait:%d, head_id:%d, tail_id:%d\n",
1053 gtid + 1, this_thr->th.th_spin_here,
1054 this_thr->th.th_next_waiting, head_id, tail_id);
1058 if (
lck->lk.head_id >= 1) {
1072 return TCR_4(
lck->lk.owner_id) - 1;
1076 return lck->lk.depth_locked != -1;
1080template <
bool takeTime>
1083__forceinline
static int
1092 ompt_state_t prev_state = ompt_state_undefined;
1096 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n",
lck, gtid));
1100 spin_here_p = &this_thr->th.th_spin_here;
1102#ifdef DEBUG_QUEUING_LOCKS
1103 TRACE_LOCK(gtid + 1,
"acq ent");
1105 __kmp_dump_queuing_lock(this_thr, gtid,
lck, *head_id_p, *tail_id_p);
1106 if (this_thr->th.th_next_waiting != 0)
1107 __kmp_dump_queuing_lock(this_thr, gtid,
lck, *head_id_p, *tail_id_p);
1119 *spin_here_p =
TRUE;
1131#ifdef DEBUG_QUEUING_LOCKS
1133 TRACE_LOCK_HT(gtid + 1,
"acq read: ",
head,
tail);
1144#ifdef DEBUG_QUEUING_LOCKS
1146 TRACE_LOCK(gtid + 1,
"acq enq: (-1,0)->(tid,tid)");
1154#ifdef DEBUG_QUEUING_LOCKS
1155 TRACE_LOCK_HT(gtid + 1,
"acq read: ",
head,
tail);
1164#ifdef DEBUG_QUEUING_LOCKS
1166 TRACE_LOCK(gtid + 1,
"acq enq: (h,t)->(h,tid)");
1175#ifdef DEBUG_QUEUING_LOCKS
1177 TRACE_LOCK_HT(gtid + 1,
"acq read: ",
head,
tail);
1187 *spin_here_p =
FALSE;
1191 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
1193#ifdef DEBUG_QUEUING_LOCKS
1194 TRACE_LOCK_HT(gtid + 1,
"acq exit: ",
head, 0);
1200 this_thr->th.ompt_thread_info.state = prev_state;
1201 this_thr->th.ompt_thread_info.wait_id = 0;
1215 prev_state = this_thr->th.ompt_thread_info.state;
1216 this_thr->th.ompt_thread_info.wait_id = (uint64_t)
lck;
1217 this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
1225 tail_thr->th.th_next_waiting = gtid + 1;
1229 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
1239#ifdef DEBUG_QUEUING_LOCKS
1240 TRACE_LOCK(gtid + 1,
"acq spin");
1242 if (this_thr->th.th_next_waiting != 0)
1243 __kmp_dump_queuing_lock(this_thr, gtid,
lck, *head_id_p, *tail_id_p);
1246 KA_TRACE(1000, (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after "
1247 "waiting on queue\n",
1250#ifdef DEBUG_QUEUING_LOCKS
1251 TRACE_LOCK(gtid + 1,
"acq exit 2");
1256 this_thr->th.ompt_thread_info.state = prev_state;
1257 this_thr->th.ompt_thread_info.wait_id = 0;
1269#ifdef DEBUG_QUEUING_LOCKS
1270 TRACE_LOCK(gtid + 1,
"acq retry");
1280 int retval = __kmp_acquire_queuing_lock_timed_template<false>(
lck, gtid);
1286 char const *
const func =
"omp_set_lock";
1287 if (
lck->lk.initialized !=
lck) {
1299 lck->lk.owner_id = gtid + 1;
1310 KA_TRACE(1000, (
"__kmp_test_queuing_lock: T#%d entering\n", gtid));
1324 (
"__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
1331 (
"__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
1337 char const *
const func =
"omp_test_lock";
1338 if (
lck->lk.initialized !=
lck) {
1348 lck->lk.owner_id = gtid + 1;
1358 (
"__kmp_release_queuing_lock: lck:%p, T#%d entering\n",
lck, gtid));
1360#if KMP_DEBUG || DEBUG_QUEUING_LOCKS
1364#ifdef DEBUG_QUEUING_LOCKS
1365 TRACE_LOCK(gtid + 1,
"rel ent");
1367 if (this_thr->th.th_spin_here)
1368 __kmp_dump_queuing_lock(this_thr, gtid,
lck, *head_id_p, *tail_id_p);
1369 if (this_thr->th.th_next_waiting != 0)
1370 __kmp_dump_queuing_lock(this_thr, gtid,
lck, *head_id_p, *tail_id_p);
1384#ifdef DEBUG_QUEUING_LOCKS
1386 TRACE_LOCK_HT(gtid + 1,
"rel read: ",
head,
tail);
1388 __kmp_dump_queuing_lock(this_thr, gtid,
lck,
head,
tail);
1398 (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
1400#ifdef DEBUG_QUEUING_LOCKS
1401 TRACE_LOCK_HT(gtid + 1,
"rel exit: ", 0, 0);
1414#ifdef DEBUG_QUEUING_LOCKS
1416 __kmp_dump_queuing_lock(this_thr, gtid,
lck,
head,
tail);
1424#ifdef DEBUG_QUEUING_LOCKS
1425 TRACE_LOCK(gtid + 1,
"rel deq: (h,h)->(-1,0)");
1432 waiting_id_p = &head_thr->th.th_next_waiting;
1435#ifdef DEBUG_QUEUING_LOCKS
1437 __kmp_dump_queuing_lock(this_thr, gtid,
lck,
head,
tail);
1447#ifdef DEBUG_QUEUING_LOCKS
1448 TRACE_LOCK(gtid + 1,
"rel deq: (h,t)->(h',t)");
1459#ifdef DEBUG_QUEUING_LOCKS
1461 __kmp_dump_queuing_lock(this_thr, gtid,
lck,
head,
tail);
1467 head_thr->th.th_next_waiting = 0;
1468#ifdef DEBUG_QUEUING_LOCKS
1469 TRACE_LOCK_T(gtid + 1,
"rel nw=0 for t=",
head);
1474 head_thr->th.th_spin_here =
FALSE;
1476 KA_TRACE(1000, (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: after "
1479#ifdef DEBUG_QUEUING_LOCKS
1480 TRACE_LOCK(gtid + 1,
"rel exit 2");
1487#ifdef DEBUG_QUEUING_LOCKS
1488 TRACE_LOCK(gtid + 1,
"rel retry");
1498 char const *
const func =
"omp_unset_lock";
1500 if (
lck->lk.initialized !=
lck) {
1512 lck->lk.owner_id = 0;
1517 lck->lk.location = NULL;
1518 lck->lk.head_id = 0;
1519 lck->lk.tail_id = 0;
1520 lck->lk.next_ticket = 0;
1521 lck->lk.now_serving = 0;
1522 lck->lk.owner_id = 0;
1523 lck->lk.depth_locked = -1;
1524 lck->lk.initialized =
lck;
1526 KA_TRACE(1000, (
"__kmp_init_queuing_lock: lock %p initialized\n",
lck));
1530 lck->lk.initialized = NULL;
1531 lck->lk.location = NULL;
1532 lck->lk.head_id = 0;
1533 lck->lk.tail_id = 0;
1534 lck->lk.next_ticket = 0;
1535 lck->lk.now_serving = 0;
1536 lck->lk.owner_id = 0;
1537 lck->lk.depth_locked = -1;
1541 char const *
const func =
"omp_destroy_lock";
1542 if (
lck->lk.initialized !=
lck) {
1560 lck->lk.depth_locked += 1;
1563 __kmp_acquire_queuing_lock_timed_template<false>(
lck, gtid);
1565 lck->lk.depth_locked = 1;
1567 lck->lk.owner_id = gtid + 1;
1575 char const *
const func =
"omp_set_nest_lock";
1576 if (
lck->lk.initialized !=
lck) {
1591 retval = ++
lck->lk.depth_locked;
1596 retval =
lck->lk.depth_locked = 1;
1598 lck->lk.owner_id = gtid + 1;
1605 char const *
const func =
"omp_test_nest_lock";
1606 if (
lck->lk.initialized !=
lck) {
1619 if (--(
lck->lk.depth_locked) == 0) {
1621 lck->lk.owner_id = 0;
1631 char const *
const func =
"omp_unset_nest_lock";
1633 if (
lck->lk.initialized !=
lck) {
1650 lck->lk.depth_locked = 0;
1655 lck->lk.depth_locked = 0;
1660 char const *
const func =
"omp_destroy_nest_lock";
1661 if (
lck->lk.initialized !=
lck) {
1676 return lck->lk.location;
1685 return lck->lk.flags;
1690 lck->lk.flags = flags;
1693#if KMP_USE_ADAPTIVE_LOCKS
1697#if KMP_HAVE_RTM_INTRINSICS
1698#include <immintrin.h>
1699#define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1704#define _XBEGIN_STARTED (~0u)
1705#define _XABORT_EXPLICIT (1 << 0)
1706#define _XABORT_RETRY (1 << 1)
1707#define _XABORT_CONFLICT (1 << 2)
1708#define _XABORT_CAPACITY (1 << 3)
1709#define _XABORT_DEBUG (1 << 4)
1710#define _XABORT_NESTED (1 << 5)
1711#define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF))
1714#define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1716#define STRINGIZE_INTERNAL(arg) #arg
1717#define STRINGIZE(arg) STRINGIZE_INTERNAL(arg)
1723static __inline
int _xbegin() {
1760 __asm__
volatile(
"1: .byte 0xC7; .byte 0xF8;\n"
1763 "1: movl %%eax,%0\n"
1765 :
"+r"(
res)::
"memory",
"%eax");
1771static __inline
void _xend() {
1779 __asm__
volatile(
".byte 0x0f; .byte 0x01; .byte 0xd5" :::
"memory");
1788#define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG
1790#define _xabort(ARG) \
1791 __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory");
1797#if KMP_DEBUG_ADAPTIVE_LOCKS
1802static kmp_adaptive_lock_statistics_t destroyedStats;
1805static kmp_adaptive_lock_info_t liveLocks;
1812void __kmp_init_speculative_stats() {
1813 kmp_adaptive_lock_info_t *
lck = &liveLocks;
1815 memset(
CCAST(kmp_adaptive_lock_statistics_t *, &(
lck->stats)), 0,
1816 sizeof(
lck->stats));
1827static void __kmp_remember_lock(kmp_adaptive_lock_info_t *
lck) {
1830 lck->stats.next = liveLocks.stats.next;
1831 lck->stats.prev = &liveLocks;
1833 liveLocks.stats.next =
lck;
1834 lck->stats.next->stats.prev =
lck;
1842static void __kmp_forget_lock(kmp_adaptive_lock_info_t *
lck) {
1846 kmp_adaptive_lock_info_t *n =
lck->stats.next;
1847 kmp_adaptive_lock_info_t *
p =
lck->stats.prev;
1853static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *
lck) {
1854 memset(
CCAST(kmp_adaptive_lock_statistics_t *, &
lck->stats), 0,
1855 sizeof(
lck->stats));
1856 __kmp_remember_lock(
lck);
1859static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
1860 kmp_adaptive_lock_info_t *
lck) {
1861 kmp_adaptive_lock_statistics_t
volatile *
s = &
lck->stats;
1863 t->nonSpeculativeAcquireAttempts +=
lck->acquire_attempts;
1864 t->successfulSpeculations +=
s->successfulSpeculations;
1865 t->hardFailedSpeculations +=
s->hardFailedSpeculations;
1866 t->softFailedSpeculations +=
s->softFailedSpeculations;
1867 t->nonSpeculativeAcquires +=
s->nonSpeculativeAcquires;
1868 t->lemmingYields +=
s->lemmingYields;
1871static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *
lck) {
1874 __kmp_add_stats(&destroyedStats,
lck);
1875 __kmp_forget_lock(
lck);
1881 return (total == 0) ? 0.0 : (100.0 *
count) / total;
1884void __kmp_print_speculative_stats() {
1885 kmp_adaptive_lock_statistics_t total = destroyedStats;
1886 kmp_adaptive_lock_info_t *
lck;
1888 for (
lck = liveLocks.stats.next;
lck != &liveLocks;
lck =
lck->stats.next) {
1889 __kmp_add_stats(&total,
lck);
1891 kmp_adaptive_lock_statistics_t *t = &total;
1893 t->nonSpeculativeAcquires + t->successfulSpeculations;
1894 kmp_uint32 totalSpeculations = t->successfulSpeculations +
1895 t->hardFailedSpeculations +
1896 t->softFailedSpeculations;
1897 if (totalSections <= 0)
1901 if (strcmp(__kmp_speculative_statsfile,
"-") == 0) {
1904 size_t buffLen =
KMP_STRLEN(__kmp_speculative_statsfile) + 20;
1905 char buffer[buffLen];
1906 KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
1908 statsFile.
open(buffer,
"w");
1911 fprintf(statsFile,
"Speculative lock statistics (all approximate!)\n");
1913 " Lock parameters: \n"
1914 " max_soft_retries : %10d\n"
1915 " max_badness : %10d\n",
1916 __kmp_adaptive_backoff_params.max_soft_retries,
1917 __kmp_adaptive_backoff_params.max_badness);
1918 fprintf(statsFile,
" Non-speculative acquire attempts : %10d\n",
1919 t->nonSpeculativeAcquireAttempts);
1920 fprintf(statsFile,
" Total critical sections : %10d\n",
1922 fprintf(statsFile,
" Successful speculations : %10d (%5.1f%%)\n",
1923 t->successfulSpeculations,
1924 percent(t->successfulSpeculations, totalSections));
1925 fprintf(statsFile,
" Non-speculative acquires : %10d (%5.1f%%)\n",
1926 t->nonSpeculativeAcquires,
1927 percent(t->nonSpeculativeAcquires, totalSections));
1928 fprintf(statsFile,
" Lemming yields : %10d\n\n",
1931 fprintf(statsFile,
" Speculative acquire attempts : %10d\n",
1933 fprintf(statsFile,
" Successes : %10d (%5.1f%%)\n",
1934 t->successfulSpeculations,
1935 percent(t->successfulSpeculations, totalSpeculations));
1936 fprintf(statsFile,
" Soft failures : %10d (%5.1f%%)\n",
1937 t->softFailedSpeculations,
1938 percent(t->softFailedSpeculations, totalSpeculations));
1939 fprintf(statsFile,
" Hard failures : %10d (%5.1f%%)\n",
1940 t->hardFailedSpeculations,
1941 percent(t->hardFailedSpeculations, totalSpeculations));
1944#define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
1946#define KMP_INC_STAT(lck, stat)
1953 bool res =
lck->lk.head_id == 0;
1957#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1960 __sync_synchronize();
1968__kmp_update_badness_after_success(kmp_adaptive_lock_t *
lck) {
1970 lck->lk.adaptive.badness = 0;
1971 KMP_INC_STAT(
lck, successfulSpeculations);
1975static __inline
void __kmp_step_badness(kmp_adaptive_lock_t *
lck) {
1976 kmp_uint32 newBadness = (
lck->lk.adaptive.badness << 1) | 1;
1977 if (newBadness >
lck->lk.adaptive.max_badness) {
1980 lck->lk.adaptive.badness = newBadness;
1986static __inline
int __kmp_should_speculate(kmp_adaptive_lock_t *
lck,
1990 int res = (attempts & badness) == 0;
1997static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *
lck,
1999 int retries =
lck->lk.adaptive.max_soft_retries;
2011 if (
status == _XBEGIN_STARTED) {
2016 if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
lck))) {
2025 if (
status & SOFT_ABORT_MASK) {
2026 KMP_INC_STAT(
lck, softFailedSpeculations);
2029 KMP_INC_STAT(
lck, hardFailedSpeculations);
2034 }
while (retries--);
2038 __kmp_step_badness(
lck);
2045static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *
lck,
kmp_int32 gtid) {
2047 if (__kmp_should_speculate(
lck, gtid) &&
2048 __kmp_test_adaptive_lock_only(
lck, gtid))
2053 lck->lk.adaptive.acquire_attempts++;
2057 KMP_INC_STAT(
lck, nonSpeculativeAcquires);
2064static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *
lck,
2066 char const *
const func =
"omp_test_lock";
2067 if (
lck->lk.qlk.initialized != GET_QLK_PTR(
lck)) {
2071 int retval = __kmp_test_adaptive_lock(
lck, gtid);
2074 lck->lk.qlk.owner_id = gtid + 1;
2090static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *
lck,
2092 if (__kmp_should_speculate(
lck, gtid)) {
2093 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
lck))) {
2094 if (__kmp_test_adaptive_lock_only(
lck, gtid))
2103 while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
lck))) {
2104 KMP_INC_STAT(
lck, lemmingYields);
2108 if (__kmp_test_adaptive_lock_only(
lck, gtid))
2115 lck->lk.adaptive.acquire_attempts++;
2117 __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(
lck), gtid);
2119 KMP_INC_STAT(
lck, nonSpeculativeAcquires);
2122static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *
lck,
2124 char const *
const func =
"omp_set_lock";
2125 if (
lck->lk.qlk.initialized != GET_QLK_PTR(
lck)) {
2132 __kmp_acquire_adaptive_lock(
lck, gtid);
2134 lck->lk.qlk.owner_id = gtid + 1;
2138static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *
lck,
2140 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
2145 __kmp_update_badness_after_success(
lck);
2153static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *
lck,
2155 char const *
const func =
"omp_unset_lock";
2157 if (
lck->lk.qlk.initialized != GET_QLK_PTR(
lck)) {
2166 lck->lk.qlk.owner_id = 0;
2167 __kmp_release_adaptive_lock(
lck, gtid);
2171static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *
lck) {
2173 lck->lk.adaptive.badness = 0;
2174 lck->lk.adaptive.acquire_attempts = 0;
2175 lck->lk.adaptive.max_soft_retries =
2176 __kmp_adaptive_backoff_params.max_soft_retries;
2177 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2178#if KMP_DEBUG_ADAPTIVE_LOCKS
2179 __kmp_zero_speculative_stats(&
lck->lk.adaptive);
2181 KA_TRACE(1000, (
"__kmp_init_adaptive_lock: lock %p initialized\n",
lck));
2184static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *
lck) {
2185#if KMP_DEBUG_ADAPTIVE_LOCKS
2186 __kmp_accumulate_speculative_stats(&
lck->lk.adaptive);
2192static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *
lck) {
2193 char const *
const func =
"omp_destroy_lock";
2194 if (
lck->lk.qlk.initialized != GET_QLK_PTR(
lck)) {
2200 __kmp_destroy_adaptive_lock(
lck);
2210 return lck->lk.owner_id - 1;
2214 return lck->lk.depth_locked != -1;
2217__forceinline
static int
2221 std::atomic<kmp_uint64> *polls =
lck->lk.polls;
2223#ifdef USE_LOCK_PROFILE
2224 if (polls[ticket &
mask] != ticket)
2241 while (polls[ticket &
mask] < ticket) {
2251 polls =
lck->lk.polls;
2256 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
2258 lck->lk.now_serving = ticket;
2265 if ((
lck->lk.old_polls != NULL) && (ticket >=
lck->lk.cleanup_ticket)) {
2267 lck->lk.old_polls = NULL;
2268 lck->lk.cleanup_ticket = 0;
2274 if (
lck->lk.old_polls == NULL) {
2275 bool reconfigure =
false;
2276 std::atomic<kmp_uint64> *old_polls = polls;
2283 if (num_polls > 1) {
2285 num_polls =
TCR_4(
lck->lk.num_polls);
2297 if (num_waiting > num_polls) {
2303 }
while (num_polls <= num_waiting);
2312 for (
i = 0;
i < old_num_polls;
i++) {
2313 polls[
i].store(old_polls[
i]);
2328 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring "
2329 "lock %p to %d polls\n",
2330 ticket,
lck, num_polls));
2332 lck->lk.old_polls = old_polls;
2333 lck->lk.polls = polls;
2337 lck->lk.num_polls = num_polls;
2346 lck->lk.cleanup_ticket =
lck->lk.next_ticket;
2359 char const *
const func =
"omp_set_lock";
2360 if (
lck->lk.initialized !=
lck) {
2372 lck->lk.owner_id = gtid + 1;
2380 std::atomic<kmp_uint64> *polls =
lck->lk.polls;
2382 if (polls[ticket &
mask] == ticket) {
2387 KA_TRACE(1000, (
"__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
2389 lck->lk.now_serving = ticket;
2405 char const *
const func =
"omp_test_lock";
2406 if (
lck->lk.initialized !=
lck) {
2416 lck->lk.owner_id = gtid + 1;
2425 std::atomic<kmp_uint64> *polls =
lck->lk.polls;
2427 KA_TRACE(1000, (
"__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
2430 polls[ticket &
mask] = ticket;
2436 char const *
const func =
"omp_unset_lock";
2438 if (
lck->lk.initialized !=
lck) {
2451 lck->lk.owner_id = 0;
2456 lck->lk.location = NULL;
2458 lck->lk.num_polls = 1;
2460 lck->lk.num_polls *
sizeof(*(
lck->lk.polls)));
2461 lck->lk.cleanup_ticket = 0;
2462 lck->lk.old_polls = NULL;
2463 lck->lk.next_ticket = 0;
2464 lck->lk.now_serving = 0;
2465 lck->lk.owner_id = 0;
2466 lck->lk.depth_locked = -1;
2467 lck->lk.initialized =
lck;
2469 KA_TRACE(1000, (
"__kmp_init_drdpa_lock: lock %p initialized\n",
lck));
2473 lck->lk.initialized = NULL;
2474 lck->lk.location = NULL;
2475 if (
lck->lk.polls.load() != NULL) {
2477 lck->lk.polls = NULL;
2479 if (
lck->lk.old_polls != NULL) {
2481 lck->lk.old_polls = NULL;
2484 lck->lk.num_polls = 0;
2485 lck->lk.cleanup_ticket = 0;
2486 lck->lk.next_ticket = 0;
2487 lck->lk.now_serving = 0;
2488 lck->lk.owner_id = 0;
2489 lck->lk.depth_locked = -1;
2493 char const *
const func =
"omp_destroy_lock";
2494 if (
lck->lk.initialized !=
lck) {
2512 lck->lk.depth_locked += 1;
2517 lck->lk.depth_locked = 1;
2519 lck->lk.owner_id = gtid + 1;
2526 char const *
const func =
"omp_set_nest_lock";
2527 if (
lck->lk.initialized !=
lck) {
2542 retval = ++
lck->lk.depth_locked;
2547 retval =
lck->lk.depth_locked = 1;
2549 lck->lk.owner_id = gtid + 1;
2556 char const *
const func =
"omp_test_nest_lock";
2557 if (
lck->lk.initialized !=
lck) {
2570 if (--(
lck->lk.depth_locked) == 0) {
2572 lck->lk.owner_id = 0;
2581 char const *
const func =
"omp_unset_nest_lock";
2583 if (
lck->lk.initialized !=
lck) {
2600 lck->lk.depth_locked = 0;
2605 lck->lk.depth_locked = 0;
2609 char const *
const func =
"omp_destroy_nest_lock";
2610 if (
lck->lk.initialized !=
lck) {
2625 return lck->lk.location;
2634 return lck->lk.flags;
2639 lck->lk.flags = flags;
2643#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2644#define __kmp_tsc() __kmp_hardware_timestamp()
2651#define __kmp_tsc() __kmp_now_nsec()
2669 for (
i = boff->
step;
i > 0;
i--) {
2672 if (__kmp_umwait_enabled) {
2686#if KMP_USE_DYNAMIC_LOCK
2690static void __kmp_init_direct_lock(kmp_dyna_lock_t *
lck,
2691 kmp_dyna_lockseq_t seq) {
2695 (
"__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
2701#define HLE_ACQUIRE ".byte 0xf2;"
2702#define HLE_RELEASE ".byte 0xf3;"
2705 __asm__
volatile(HLE_ACQUIRE
"xchg %1,%0" :
"+r"(v),
"+m"(*
p) : :
"memory");
2709static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *
lck) {
TCW_4(*
lck, 0); }
2711static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *
lck) {
2715static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *
lck,
kmp_int32 gtid) {
2729static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *
lck,
2731 __kmp_acquire_hle_lock(
lck, gtid);
2734static int __kmp_release_hle_lock(kmp_dyna_lock_t *
lck,
kmp_int32 gtid) {
2735 __asm__
volatile(HLE_RELEASE
"movl %1,%0"
2742static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *
lck,
2744 return __kmp_release_hle_lock(
lck, gtid);
2747static int __kmp_test_hle_lock(kmp_dyna_lock_t *
lck,
kmp_int32 gtid) {
2751static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *
lck,
2753 return __kmp_test_hle_lock(
lck, gtid);
2772 unsigned retries = 3,
status;
2775 if (
status == _XBEGIN_STARTED) {
2776 if (__kmp_is_unlocked_queuing_lock(
lck))
2780 if ((
status & _XABORT_EXPLICIT) && _XABORT_CODE(
status) == 0xff) {
2782 while (!__kmp_is_unlocked_queuing_lock(
lck)) {
2785 }
else if (!(
status & _XABORT_RETRY))
2787 }
while (retries--);
2795 __kmp_acquire_rtm_queuing_lock(
lck, gtid);
2801 if (__kmp_is_unlocked_queuing_lock(
lck)) {
2813 return __kmp_release_rtm_queuing_lock(
lck, gtid);
2819 unsigned retries = 3,
status;
2822 if (
status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(
lck)) {
2825 if (!(
status & _XABORT_RETRY))
2827 }
while (retries--);
2834 return __kmp_test_rtm_queuing_lock(
lck, gtid);
2840static void __kmp_destroy_rtm_spin_lock(kmp_rtm_spin_lock_t *
lck) {
2844static void __kmp_destroy_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *
lck) {
2845 __kmp_destroy_rtm_spin_lock(
lck);
2849static int __kmp_acquire_rtm_spin_lock(kmp_rtm_spin_lock_t *
lck,
2851 unsigned retries = 3,
status;
2856 if (
status == _XBEGIN_STARTED) {
2861 if ((
status & _XABORT_EXPLICIT) && _XABORT_CODE(
status) == 0xff) {
2866 }
else if (!(
status & _XABORT_RETRY))
2868 }
while (retries--);
2881static int __kmp_acquire_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *
lck,
2883 return __kmp_acquire_rtm_spin_lock(
lck, gtid);
2887static int __kmp_release_rtm_spin_lock(kmp_rtm_spin_lock_t *
lck,
2900static int __kmp_release_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *
lck,
2902 return __kmp_release_rtm_spin_lock(
lck, gtid);
2906static int __kmp_test_rtm_spin_lock(kmp_rtm_spin_lock_t *
lck,
kmp_int32 gtid) {
2907 unsigned retries = 3,
status;
2912 if (
status == _XBEGIN_STARTED &&
2916 if (!(
status & _XABORT_RETRY))
2918 }
while (retries--);
2928static int __kmp_test_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *
lck,
2930 return __kmp_test_rtm_spin_lock(
lck, gtid);
2936static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
2937 kmp_dyna_lockseq_t tag);
2938static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *
lock);
2939static int __kmp_set_indirect_lock(kmp_dyna_lock_t *
lock,
kmp_int32);
2940static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *
lock,
kmp_int32);
2941static int __kmp_test_indirect_lock(kmp_dyna_lock_t *
lock,
kmp_int32);
2942static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *
lock,
2944static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *
lock,
2946static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *
lock,
2950#define KMP_FOREACH_LOCK_KIND(m, a) m(ticket, a) m(queuing, a) m(drdpa, a)
2952#define expand1(lk, op) \
2953 static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) { \
2954 __kmp_##op##_##lk##_##lock(&lock->lk); \
2956#define expand2(lk, op) \
2957 static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock, \
2959 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \
2961#define expand3(lk, op) \
2962 static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock, \
2963 kmp_lock_flags_t flags) { \
2964 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \
2966#define expand4(lk, op) \
2967 static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock, \
2968 const ident_t *loc) { \
2969 __kmp_set_##lk##_lock_location(&lock->lk, loc); \
2972KMP_FOREACH_LOCK_KIND(expand1, init)
2973KMP_FOREACH_LOCK_KIND(expand1, init_nested)
2974KMP_FOREACH_LOCK_KIND(expand1, destroy)
2975KMP_FOREACH_LOCK_KIND(expand1, destroy_nested)
2976KMP_FOREACH_LOCK_KIND(expand2, acquire)
2977KMP_FOREACH_LOCK_KIND(expand2, acquire_nested)
2978KMP_FOREACH_LOCK_KIND(expand2,
release)
2979KMP_FOREACH_LOCK_KIND(expand2, release_nested)
2980KMP_FOREACH_LOCK_KIND(expand2,
test)
2981KMP_FOREACH_LOCK_KIND(expand2, test_nested)
2982KMP_FOREACH_LOCK_KIND(expand3, )
2983KMP_FOREACH_LOCK_KIND(expand4, )
2994#define expand(l, op) 0, __kmp_init_direct_lock,
2995void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
2996 __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
3000#define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock,
3001static void (*direct_destroy[])(kmp_dyna_lock_t *) = {
3002 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
3004#define expand(l, op) \
3005 0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks,
3006static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = {
3007 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
3011#define expand(l, op) \
3012 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
3013static int (*direct_set[])(kmp_dyna_lock_t *,
kmp_int32) = {
3014 __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
3016#define expand(l, op) \
3017 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
3018static int (*direct_set_check[])(kmp_dyna_lock_t *,
kmp_int32) = {
3019 __kmp_set_indirect_lock_with_checks, 0,
3020 KMP_FOREACH_D_LOCK(expand, acquire)};
3024#define expand(l, op) \
3025 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
3026static int (*direct_unset[])(kmp_dyna_lock_t *,
kmp_int32) = {
3027 __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand,
release)};
3028static int (*direct_test[])(kmp_dyna_lock_t *,
kmp_int32) = {
3029 __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand,
test)};
3031#define expand(l, op) \
3032 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
3033static int (*direct_unset_check[])(kmp_dyna_lock_t *,
kmp_int32) = {
3034 __kmp_unset_indirect_lock_with_checks, 0,
3035 KMP_FOREACH_D_LOCK(expand,
release)};
3036static int (*direct_test_check[])(kmp_dyna_lock_t *,
kmp_int32) = {
3037 __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand,
test)};
3041void (**__kmp_direct_destroy)(kmp_dyna_lock_t *) = 0;
3042int (**__kmp_direct_set)(kmp_dyna_lock_t *,
kmp_int32) = 0;
3043int (**__kmp_direct_unset)(kmp_dyna_lock_t *,
kmp_int32) = 0;
3044int (**__kmp_direct_test)(kmp_dyna_lock_t *,
kmp_int32) = 0;
3047#define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
3049 KMP_FOREACH_I_LOCK(expand, init)};
3052#define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
3054 KMP_FOREACH_I_LOCK(expand, destroy)};
3056#define expand(l, op) \
3057 (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks,
3059 KMP_FOREACH_I_LOCK(expand, destroy)};
3063#define expand(l, op) \
3064 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
3066 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)};
3068#define expand(l, op) \
3069 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
3071 KMP_FOREACH_I_LOCK(expand, acquire)};
3075#define expand(l, op) \
3076 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
3078 KMP_FOREACH_I_LOCK(expand,
release)};
3082#define expand(l, op) \
3083 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
3085 KMP_FOREACH_I_LOCK(expand,
release)};
3087 KMP_FOREACH_I_LOCK(expand,
test)};
3097kmp_indirect_lock_table_t __kmp_i_lock_table;
3100static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
3107const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
3113static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
3120kmp_indirect_lock_t *__kmp_allocate_indirect_lock(
void **user_lock,
3122 kmp_indirect_locktag_t tag) {
3123 kmp_indirect_lock_t *
lck;
3128 if (__kmp_indirect_lock_pool[tag] != NULL) {
3130 lck = __kmp_indirect_lock_pool[tag];
3132 idx =
lck->lock->pool.index;
3133 __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)
lck->lock->pool.next;
3134 KA_TRACE(20, (
"__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
3138 kmp_indirect_lock_table_t *lock_table = &__kmp_i_lock_table;
3142 table_idx = lock_table->next;
3143 idx += lock_table->next;
3144 if (table_idx < lock_table->nrow_ptrs * KMP_I_LOCK_CHUNK) {
3145 row = table_idx / KMP_I_LOCK_CHUNK;
3146 col = table_idx % KMP_I_LOCK_CHUNK;
3148 if (!lock_table->table[row]) {
3150 sizeof(kmp_indirect_lock_t) * KMP_I_LOCK_CHUNK);
3155 if (!lock_table->next_table) {
3156 kmp_indirect_lock_table_t *next_table =
3158 sizeof(kmp_indirect_lock_table_t));
3160 sizeof(kmp_indirect_lock_t *) * 2 * lock_table->nrow_ptrs);
3161 next_table->nrow_ptrs = 2 * lock_table->nrow_ptrs;
3162 next_table->next = 0;
3163 next_table->next_table =
nullptr;
3164 lock_table->next_table = next_table;
3166 lock_table = lock_table->next_table;
3171 lck = &lock_table->table[row][col];
3175 (
"__kmp_allocate_indirect_lock: allocated a new lock %p\n",
lck));
3186 *((kmp_indirect_lock_t **)user_lock) =
lck;
3193static __forceinline kmp_indirect_lock_t *
3194__kmp_lookup_indirect_lock(
void **user_lock,
const char *
func) {
3196 kmp_indirect_lock_t *
lck = NULL;
3197 if (user_lock == NULL) {
3202 lck = __kmp_get_i_lock(idx);
3204 lck = *((kmp_indirect_lock_t **)user_lock);
3212 return __kmp_get_i_lock(KMP_EXTRACT_I_INDEX(user_lock));
3214 return *((kmp_indirect_lock_t **)user_lock);
3219static void __kmp_init_indirect_lock(kmp_dyna_lock_t *
lock,
3220 kmp_dyna_lockseq_t seq) {
3221#if KMP_USE_ADAPTIVE_LOCKS
3222 if (seq == lockseq_adaptive && !__kmp_cpuinfo.flags.rtm) {
3223 KMP_WARNING(AdaptiveNotSupported,
"kmp_lockseq_t",
"adaptive");
3224 seq = lockseq_queuing;
3228 if (seq == lockseq_rtm_queuing && !__kmp_cpuinfo.flags.rtm) {
3229 seq = lockseq_queuing;
3232 kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
3233 kmp_indirect_lock_t *l =
3235 KMP_I_LOCK_FUNC(l, init)(l->lock);
3237 20, (
"__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
3241static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *
lock) {
3243 kmp_indirect_lock_t *l =
3244 __kmp_lookup_indirect_lock((
void **)
lock,
"omp_destroy_lock");
3245 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3246 kmp_indirect_locktag_t tag = l->type;
3253 l->lock->pool.index = KMP_EXTRACT_I_INDEX(
lock);
3255 __kmp_indirect_lock_pool[tag] = l;
3260static int __kmp_set_indirect_lock(kmp_dyna_lock_t *
lock,
kmp_int32 gtid) {
3261 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(
lock);
3262 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3265static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *
lock,
kmp_int32 gtid) {
3266 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(
lock);
3267 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3270static int __kmp_test_indirect_lock(kmp_dyna_lock_t *
lock,
kmp_int32 gtid) {
3271 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(
lock);
3272 return KMP_I_LOCK_FUNC(l,
test)(l->lock, gtid);
3275static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *
lock,
3277 kmp_indirect_lock_t *l =
3278 __kmp_lookup_indirect_lock((
void **)
lock,
"omp_set_lock");
3279 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3282static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *
lock,
3284 kmp_indirect_lock_t *l =
3285 __kmp_lookup_indirect_lock((
void **)
lock,
"omp_unset_lock");
3286 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3289static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *
lock,
3291 kmp_indirect_lock_t *l =
3292 __kmp_lookup_indirect_lock((
void **)
lock,
"omp_test_lock");
3293 return KMP_I_LOCK_FUNC(l,
test)(l->lock, gtid);
3296kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
3302 case lockseq_nested_tas:
3306 case lockseq_nested_futex:
3307 return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)
lck);
3309 case lockseq_ticket:
3310 case lockseq_nested_ticket:
3312 case lockseq_queuing:
3313 case lockseq_nested_queuing:
3314#if KMP_USE_ADAPTIVE_LOCKS
3315 case lockseq_adaptive:
3319 case lockseq_nested_drdpa:
3327void __kmp_init_dynamic_user_locks() {
3330 __kmp_direct_set = direct_set_check;
3331 __kmp_direct_unset = direct_unset_check;
3332 __kmp_direct_test = direct_test_check;
3333 __kmp_direct_destroy = direct_destroy_check;
3334 __kmp_indirect_set = indirect_set_check;
3335 __kmp_indirect_unset = indirect_unset_check;
3336 __kmp_indirect_test = indirect_test_check;
3337 __kmp_indirect_destroy = indirect_destroy_check;
3339 __kmp_direct_set = direct_set;
3340 __kmp_direct_unset = direct_unset;
3341 __kmp_direct_test = direct_test;
3342 __kmp_direct_destroy = direct_destroy;
3343 __kmp_indirect_set = indirect_set;
3344 __kmp_indirect_unset = indirect_unset;
3345 __kmp_indirect_test = indirect_test;
3346 __kmp_indirect_destroy = indirect_destroy;
3355 __kmp_i_lock_table.nrow_ptrs = KMP_I_LOCK_TABLE_INIT_NROW_PTRS;
3356 __kmp_i_lock_table.table = (kmp_indirect_lock_t **)
__kmp_allocate(
3357 sizeof(kmp_indirect_lock_t *) * KMP_I_LOCK_TABLE_INIT_NROW_PTRS);
3358 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)
__kmp_allocate(
3359 KMP_I_LOCK_CHUNK *
sizeof(kmp_indirect_lock_t));
3360 __kmp_i_lock_table.next = 0;
3361 __kmp_i_lock_table.next_table =
nullptr;
3366#if KMP_USE_ADAPTIVE_LOCKS
3367 __kmp_indirect_lock_size[locktag_adaptive] =
sizeof(kmp_adaptive_lock_t);
3373 __kmp_indirect_lock_size[locktag_nested_tas] =
sizeof(
kmp_tas_lock_t);
3375 __kmp_indirect_lock_size[locktag_nested_futex] =
sizeof(kmp_futex_lock_t);
3382#define fill_jumps(table, expand, sep) \
3384 table[locktag##sep##ticket] = expand(ticket); \
3385 table[locktag##sep##queuing] = expand(queuing); \
3386 table[locktag##sep##drdpa] = expand(drdpa); \
3389#if KMP_USE_ADAPTIVE_LOCKS
3390#define fill_table(table, expand) \
3392 fill_jumps(table, expand, _); \
3393 table[locktag_adaptive] = expand(queuing); \
3394 fill_jumps(table, expand, _nested_); \
3397#define fill_table(table, expand) \
3399 fill_jumps(table, expand, _); \
3400 fill_jumps(table, expand, _nested_); \
3405 (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location
3406 fill_table(__kmp_indirect_set_location, expand);
3409 (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags
3410 fill_table(__kmp_indirect_set_flags, expand);
3413 (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location
3414 fill_table(__kmp_indirect_get_location, expand);
3417 (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags
3418 fill_table(__kmp_indirect_get_flags, expand);
3425void __kmp_cleanup_indirect_user_locks() {
3430 for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
3431 kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
3433 kmp_indirect_lock_t *ll = l;
3434 l = (kmp_indirect_lock_t *)l->lock->pool.next;
3435 KA_TRACE(20, (
"__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
3440 __kmp_indirect_lock_pool[k] = NULL;
3443 kmp_indirect_lock_table_t *ptr = &__kmp_i_lock_table;
3445 for (
kmp_uint32 row = 0; row < ptr->nrow_ptrs; ++row) {
3446 if (!ptr->table[row])
3448 for (
kmp_uint32 col = 0; col < KMP_I_LOCK_CHUNK; ++col) {
3449 kmp_indirect_lock_t *l = &ptr->table[row][col];
3452 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3453 KA_TRACE(20, (
"__kmp_cleanup_indirect_user_locks: destroy/freeing %p "
3461 kmp_indirect_lock_table_t *next_table = ptr->next_table;
3462 if (ptr != &__kmp_i_lock_table)
3484static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *
lck) {
3485 __kmp_init_futex_lock(
lck);
3488static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *
lck) {
3489 __kmp_init_nested_futex_lock(
lck);
3494 return lck ==
lck->lk.self;
3506 return lck ==
lck->lk.initialized;
3518#if KMP_USE_ADAPTIVE_LOCKS
3519static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *
lck) {
3520 __kmp_init_adaptive_lock(
lck);
3525 return lck ==
lck->lk.initialized;
3575 switch (user_lock_kind) {
3714#if KMP_USE_ADAPTIVE_LOCKS
3828 static int last_index = 0;
3841 new_block->
locks = (
void *)buffer;
3910 lck->pool.index = index;
3920 if (user_lock == NULL) {
3953#define IS_CRITICAL(lck) \
3954 ((__kmp_get_user_lock_flags_ != NULL) && \
3955 ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section))
4005 (
"__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
4008 KA_TRACE(20, (
"__kmp_cleanup_user_locks: free lock %p (%p)\n",
lck,
4030 while (table_ptr != NULL) {
4042 while (block_ptr != NULL) {
This class safely opens and closes a C-style FILE* object using RAII semantics.
void set_stdout()
Set the FILE* object to stdout and output there No open call should happen before this call.
void open(const char *filename, const char *mode, const char *env_var=nullptr)
Open filename using mode.
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
volatile int __kmp_init_user_locks
#define KMP_PACK_64(HIGH_32, LOW_32)
#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time)
kmp_lock_t __kmp_global_lock
#define __kmp_entry_gtid()
kmp_info_t ** __kmp_threads
#define KMP_YIELD_OVERSUB()
#define KMP_INIT_YIELD(count)
#define KMP_INIT_BACKOFF(time)
#define __kmp_allocate(size)
int __kmp_env_consistency_check
static kmp_info_t * __kmp_thread_from_gtid(int gtid)
union KMP_ALIGN_CACHE kmp_info kmp_info_t
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86<<, 2i, 1, KMP_ARCH_X86) ATOMIC_CMPXCHG(fixed2, shr, kmp_int16, 16, > KMP_ARCH_X86 KMP_ARCH_X86 kmp_uint32
#define KMP_DEBUG_ASSERT(cond)
#define KMP_ASSERT2(cond, msg)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
static kmp_bootstrap_lock_t lock
void __kmp_printf(char const *format,...)
void __kmp_printf_no_lock(char const *format,...)
#define KMP_FSYNC_PREPARE(obj)
#define KMP_FSYNC_RELEASING(obj)
#define KMP_FSYNC_ACQUIRED(obj)
kmp_backoff_t __kmp_spin_backoff_params
size_t __kmp_base_user_lock_size
static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck)
static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck)
static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck)
static bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck)
enum kmp_lock_kind __kmp_user_lock_kind
int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid, kmp_lock_flags_t flags)
static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck)
size_t __kmp_user_lock_size
int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
int(* __kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck, kmp_int32 gtid)
static const ident_t * __kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck)
static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck, const ident_t *loc)
void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck)
static __forceinline int __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck, kmp_int32 gtid)
static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck, kmp_lock_flags_t flags)
static void __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck)
void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck)
int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
void(* __kmp_set_user_lock_location_)(kmp_user_lock_p lck, const ident_t *loc)
static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck)
int(* __kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck, kmp_int32 gtid)
int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck)
int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck)
int(* __kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck, kmp_int32 gtid)
void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck)
void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind)
kmp_int32(* __kmp_get_user_lock_owner_)(kmp_user_lock_p lck)
static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck)
static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck)
kmp_block_of_locks * __kmp_lock_blocks
static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck)
kmp_uint64 __kmp_now_nsec()
static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck)
void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck)
static bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck)
static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck)
static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck)
int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
int(* __kmp_is_user_lock_initialized_)(kmp_user_lock_p lck)
int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck)
static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck)
void(* __kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck)
static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static const ident_t * __kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck)
void __kmp_init_tas_lock(kmp_tas_lock_t *lck)
static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck, const ident_t *loc)
static int __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
int(* __kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck, kmp_int32 gtid)
kmp_lock_table_t __kmp_user_lock_table
static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck)
void(* __kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck)
void(* __kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck)
int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
static void __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck)
static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck)
int __kmp_num_locks_in_block
static const ident_t * __kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck)
static kmp_uint32 __kmp_bakery_check(void *now_serving, kmp_uint32 my_ticket)
int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid, kmp_user_lock_p lck)
static __forceinline int __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck)
static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck, kmp_int32 gtid)
static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck)
static __forceinline int __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid)
static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck, kmp_int32 gtid)
void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck)
int(* __kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck, kmp_int32 gtid)
static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck)
const ident_t *(* __kmp_get_user_lock_location_)(kmp_user_lock_p lck)
static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck)
int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
static bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck)
static kmp_user_lock_p __kmp_lock_block_allocate()
static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck, kmp_lock_flags_t flags)
int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, char const *func)
int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck, kmp_lock_flags_t flags)
static bool before(kmp_uint64 a, kmp_uint64 b)
void(* __kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck)
kmp_user_lock_p __kmp_lock_pool
int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck, kmp_int32 gtid)
int(* __kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck, kmp_int32 gtid)
static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck)
static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck)
void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck)
void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck)
void __kmp_cleanup_user_locks(void)
void(* __kmp_destroy_user_lock_)(kmp_user_lock_p lck)
static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck, kmp_int32 gtid)
void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck)
static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck)
void __kmp_spin_backoff(kmp_backoff_t *boff)
void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck)
void __kmp_validate_locks(void)
int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid)
static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck, kmp_int32 gtid)
int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid)
int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck, kmp_int32 gtid)
static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
kmp_lock_flags_t(* __kmp_get_user_lock_flags_)(kmp_user_lock_p lck)
static bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck)
static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck)
static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck, const ident_t *loc)
static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck, kmp_int32 gtid)
static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck)
static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck, kmp_int32 gtid)
static __forceinline int __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static int __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, kmp_int32 gtid)
static void __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck)
static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck)
static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck)
int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid)
int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid)
static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck)
static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck, kmp_int32 gtid)
void(* __kmp_set_user_lock_flags_)(kmp_user_lock_p lck, kmp_lock_flags_t flags)
void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck)
static void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck)
struct kmp_base_tas_lock kmp_base_tas_lock_t
#define KMP_BIND_NESTED_USER_LOCK(kind)
static int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid)
enum kmp_lock_kind kmp_lock_kind_t
union kmp_user_lock * kmp_user_lock_p
union kmp_ticket_lock kmp_ticket_lock_t
static void __kmp_destroy_user_lock(kmp_user_lock_p lck)
#define KMP_LOCK_RELEASED
static int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck)
#define KMP_LOCK_FREE(type)
#define KMP_LOCK_STRIP(v)
struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t
#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind)
#define KMP_LOCK_ACQUIRED_NEXT
#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind)
kmp_uint32 kmp_lock_flags_t
union kmp_drdpa_lock kmp_drdpa_lock_t
static void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid)
kmp_uint32 kmp_lock_index_t
#define KMP_LOCK_ACQUIRED_FIRST
struct kmp_base_queuing_lock kmp_base_queuing_lock_t
#define KMP_BIND_USER_LOCK(kind)
static kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck)
static void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck)
union kmp_tas_lock kmp_tas_lock_t
static void __kmp_set_user_lock_flags(kmp_user_lock_p lck, kmp_lock_flags_t flags)
#define KMP_LOCK_STILL_HELD
#define KMP_LOCK_BUSY(v, type)
#define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock)
struct kmp_base_ticket_lock kmp_base_ticket_lock_t
union kmp_queuing_lock kmp_queuing_lock_t
static const ident_t * __kmp_get_user_lock_location(kmp_user_lock_p lck)
#define KMP_XCHG_FIXED32(p, v)
#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv)
#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv)
#define KMP_ATOMIC_ST_REL(p, v)
#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv)
#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv)
bool __kmp_atomic_compare_store_acq(std::atomic< T > *p, T expected, T desired)
#define KMP_ATOMIC_LD_RLX(p)
#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv)
#define KMP_ATTRIBUTE_TARGET_RTM
#define KMP_ATOMIC_INC(p)
kmp_str_loc_t __kmp_str_loc_init(char const *psource, bool init_fname)
void __kmp_str_loc_free(kmp_str_loc_t *loc)
int test(unsigned upper_bound)
ompt_callbacks_active_t ompt_enabled
The ident structure that describes a source location.
struct kmp_block_of_locks * next_block
union kmp_user_lock * next
kmp_lock_index_t allocated