24#define USE_NGO_STORES 1
27#if KMP_MIC && USE_NGO_STORES
29#define ngo_load(src) __m512d Vt = _mm512_load_pd((void *)(src))
30#define ngo_store_icvs(dst, src) _mm512_storenrngo_pd((void *)(dst), Vt)
31#define ngo_store_go(dst, src) _mm512_storenrngo_pd((void *)(dst), Vt)
32#define ngo_sync() __asm__ volatile("lock; addl $0,0(%%rsp)" ::: "memory")
34#define ngo_load(src) ((void)0)
35#define ngo_store_icvs(dst, src) copy_icvs((dst), (src))
36#define ngo_store_go(dst, src) KMP_MEMCPY((dst), (src), CACHE_LINE)
37#define ngo_sync() ((void)0)
47void distributedBarrier::computeVarsForN(
size_t n) {
52 int ncores_per_socket =
58 if (ncores_per_socket <= 0)
59 ncores_per_socket = 1;
78 if (nsockets == 1 ||
num_gos == 1)
109void distributedBarrier::computeGo(
size_t n) {
128void distributedBarrier::resize(
size_t nthr) {
166 go[
j].go.store(next_go);
183void distributedBarrier::init(
size_t nthr) {
200 computeVarsForN(nthr);
236 size_t start,
size_t stop,
size_t inc,
243 for (
size_t thr = start; thr <
stop; thr += inc) {
245 int gtid = other_threads[thr]->th.th_info.ds.ds_gtid;
262 team = this_thr->th.th_team;
263 nproc = this_thr->th.th_team_nproc;
264 other_threads = team->
t.t_threads;
266 my_current_iter =
b->iter[tid].iter;
268 group_leader = ((tid %
b->threads_per_group) == 0);
271 (
"__kmp_dist_barrier_gather: T#%d(%d:%d) enter; barrier type %d\n",
272 gtid, team->
t.t_id, tid, bt));
274#if USE_ITT_BUILD && USE_ITT_NOTIFY
276 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
277 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
278 __itt_get_timestamp();
284 size_t group_start = tid + 1;
285 size_t group_end = tid +
b->threads_per_group;
286 size_t threads_pending = 0;
288 if (group_end > nproc)
293 for (
size_t thr = group_start; thr < group_end; thr++) {
295 threads_pending +=
b->flags[my_current_iter][thr].stillNeed;
300 if (task_team != NULL) {
303 int tasks_completed =
FALSE;
322 }
while (threads_pending > 0);
328 for (
size_t thr = group_start; thr < group_end; thr++) {
329 (*reduce)(this_thr->th.th_local.reduce_data,
330 other_threads[thr]->th.th_local.reduce_data);
336 b->flags[my_next_iter][tid].stillNeed = 1;
339 b->flags[my_current_iter][tid].stillNeed = 0;
343 for (
size_t thr = 0; thr < nproc; thr +=
b->threads_per_group) {
344 threads_pending +=
b->flags[my_current_iter][thr].stillNeed;
349 if (task_team != NULL) {
352 int tasks_completed =
FALSE;
371 }
while (threads_pending > 0);
377 for (
size_t thr =
b->threads_per_group; thr < nproc;
378 thr +=
b->threads_per_group) {
379 (*reduce)(this_thr->th.th_local.reduce_data,
380 other_threads[thr]->th.th_local.reduce_data);
387 b->flags[my_next_iter][tid].stillNeed = 1;
390 b->flags[my_current_iter][tid].stillNeed = 0;
396 (
"__kmp_dist_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
397 gtid, team->
t.t_id, tid, bt));
411 KA_TRACE(20, (
"__kmp_dist_barrier_release: T#%d(%d) enter; barrier type %d\n",
414 thr_bar = &this_thr->th.th_bar[bt].bb;
419 if (this_thr->th.th_used_in_team.load() != 1 &&
420 this_thr->th.th_used_in_team.load() != 3) {
428 this_thr->th.th_used_in_team.load() == 0) {
431#if USE_ITT_BUILD && USE_ITT_NOTIFY
432 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
437 __kmp_itt_task_starting(itt_sync_obj);
443 if (itt_sync_obj != NULL)
445 __kmp_itt_task_finished(itt_sync_obj);
451 if (this_thr->th.th_used_in_team.load() != 1 &&
452 this_thr->th.th_used_in_team.load() != 3)
464 team = this_thr->th.th_team;
468 my_current_iter =
b->iter[tid].iter;
470 my_go_index = tid /
b->threads_per_go;
471 if (this_thr->th.th_used_in_team.load() == 3) {
476 if (
b->go[my_go_index].go.load() != next_go) {
479 &(
b->go[my_go_index].go), next_go, &(
b->sleep[tid].sleep));
482 b->iter[tid].iter == 0);
493 if (this_thr->th.th_used_in_team.load() == 1)
500 group_leader = ((tid %
b->threads_per_group) == 0);
503 for (
size_t go_idx = my_go_index + 1;
504 go_idx < my_go_index +
b->gos_per_group; go_idx++) {
505 b->go[go_idx].go.store(next_go);
511#if KMP_BARRIER_ICV_PUSH
512 if (propagate_icvs) {
515 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
518 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
524 size_t nproc = this_thr->th.th_team_nproc;
525 size_t group_end = tid +
b->threads_per_group;
526 if (nproc < group_end)
531 team = this_thr->th.th_team;
533 my_current_iter =
b->iter[tid].iter;
535#if KMP_BARRIER_ICV_PUSH
536 if (propagate_icvs) {
539 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
543 for (
size_t go_idx = 0; go_idx <
b->num_gos; go_idx +=
b->gos_per_group) {
544 b->go[go_idx].go.store(next_go);
549 size_t nproc = this_thr->th.th_team_nproc;
551 b->threads_per_group, tid);
555 for (
size_t go_idx = 1; go_idx <
b->gos_per_group; go_idx++) {
556 b->go[go_idx].go.store(next_go);
564 size_t nproc = this_thr->th.th_team_nproc;
565 size_t group_end = tid +
b->threads_per_group;
566 if (nproc < group_end)
576 20, (
"__kmp_dist_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
577 gtid, team->
t.t_id, tid, bt));
581template <
bool cancellable = false>
592 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
593 gtid, team->
t.t_id, tid, bt));
594 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
596#if USE_ITT_BUILD && USE_ITT_NOTIFY
598 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
599 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
600 __itt_get_timestamp();
607 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d)"
608 "arrived(%p): %llu => %llu\n",
610 team->
t.t_id, 0, &thr_bar->b_arrived, thr_bar->b_arrived,
620 int nproc = this_thr->th.th_team_nproc;
626 for (
i = 1;
i < nproc; ++
i) {
632 KA_TRACE(20, (
"__kmp_linear_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%d) "
633 "arrived(%p) == %llu\n",
636 &other_threads[
i]->th.th_bar[bt].bb.b_arrived, new_state));
641 &other_threads[
i]->th.th_bar[bt].bb.b_arrived, new_state);
649#if USE_ITT_BUILD && USE_ITT_NOTIFY
652 if (__kmp_forkjoin_frames_mode == 2) {
653 this_thr->th.th_bar_min_time =
KMP_MIN(
654 this_thr->th.th_bar_min_time, other_threads[
i]->th.th_bar_min_time);
659 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
664 (*reduce)(this_thr->th.th_local.reduce_data,
665 other_threads[
i]->th.th_local.reduce_data);
671 KA_TRACE(20, (
"__kmp_linear_barrier_gather: T#%d(%d:%d) set team %d "
672 "arrived(%p) = %llu\n",
673 gtid, team->
t.t_id, tid, team->
t.t_id, &team_bar->
b_arrived,
678 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
679 gtid, team->
t.t_id, tid, bt));
683template <
bool cancellable = false>
693 kmp_uint32 nproc = this_thr->th.th_team_nproc;
698 other_threads = team->
t.t_threads;
700 KA_TRACE(20, (
"__kmp_linear_barrier_release: T#%d(%d:%d) primary enter for "
702 gtid, team->
t.t_id, tid, bt));
705#if KMP_BARRIER_ICV_PUSH
708 if (propagate_icvs) {
709 ngo_load(&team->
t.t_implicit_task_taskdata[0].td_icvs);
710 for (
i = 1;
i < nproc; ++
i) {
714 &team->
t.t_implicit_task_taskdata[0].td_icvs);
722 for (
i = 1;
i < nproc; ++
i) {
730 (
"__kmp_linear_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%d) "
731 "go(%p): %u => %u\n",
732 gtid, team->
t.t_id, tid, other_threads[
i]->th.th_info.ds.ds_gtid,
733 team->
t.t_id,
i, &other_threads[
i]->th.th_bar[bt].bb.b_go,
734 other_threads[
i]->th.th_bar[bt].bb.b_go,
742 KA_TRACE(20, (
"__kmp_linear_barrier_release: T#%d wait go(%p) == %u\n",
752#if USE_ITT_BUILD && USE_ITT_NOTIFY
753 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
758 __kmp_itt_task_starting(itt_sync_obj);
764 if (itt_sync_obj != NULL)
766 __kmp_itt_task_finished(itt_sync_obj);
780 (
"__kmp_linear_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
786 (
"__kmp_linear_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
787 gtid, team->
t.t_id, tid, bt));
827 kmp_uint32 nproc = this_thr->th.th_team_nproc;
835 20, (
"__kmp_tree_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
836 gtid, team->
t.t_id, tid, bt));
837 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
839#if USE_ITT_BUILD && USE_ITT_NOTIFY
841 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
842 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
843 __itt_get_timestamp();
848 child_tid = (tid << branch_bits) + 1;
849 if (child_tid < nproc) {
854 kmp_info_t *child_thr = other_threads[child_tid];
858 if (child + 1 <= branch_factor && child_tid + 1 < nproc)
860 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_arrived);
863 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%u) "
864 "arrived(%p) == %llu\n",
866 team->
t.t_id, child_tid, &child_bar->b_arrived, new_state));
870#if USE_ITT_BUILD && USE_ITT_NOTIFY
873 if (__kmp_forkjoin_frames_mode == 2) {
874 this_thr->th.th_bar_min_time =
KMP_MIN(this_thr->th.th_bar_min_time,
875 child_thr->th.th_bar_min_time);
880 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
882 team->
t.t_id, child_tid));
885 (*reduce)(this_thr->th.th_local.reduce_data,
886 child_thr->th.th_local.reduce_data);
891 }
while (child <= branch_factor && child_tid < nproc);
895 kmp_int32 parent_tid = (tid - 1) >> branch_bits;
898 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
899 "arrived(%p): %llu => %llu\n",
901 team->
t.t_id, parent_tid, &thr_bar->b_arrived, thr_bar->b_arrived,
913 team->
t.t_bar[bt].b_arrived = new_state;
916 KA_TRACE(20, (
"__kmp_tree_barrier_gather: T#%d(%d:%d) set team %d "
917 "arrived(%p) = %llu\n",
918 gtid, team->
t.t_id, tid, team->
t.t_id,
919 &team->
t.t_bar[bt].b_arrived, team->
t.t_bar[bt].b_arrived));
922 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
923 gtid, team->
t.t_id, tid, bt));
941 KA_TRACE(20, (
"__kmp_tree_barrier_release: T#%d wait go(%p) == %u\n", gtid,
946#if USE_ITT_BUILD && USE_ITT_NOTIFY
947 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
952 __kmp_itt_task_starting(itt_sync_obj);
958 if (itt_sync_obj != NULL)
960 __kmp_itt_task_finished(itt_sync_obj);
974 (
"__kmp_tree_barrier_release: T#%d(%d:%d) set go(%p) = %u\n", gtid,
980 KA_TRACE(20, (
"__kmp_tree_barrier_release: T#%d(%d:%d) primary enter for "
982 gtid, team->
t.t_id, tid, bt));
984 nproc = this_thr->th.th_team_nproc;
985 child_tid = (tid << branch_bits) + 1;
987 if (child_tid < nproc) {
992 kmp_info_t *child_thr = other_threads[child_tid];
996 if (child + 1 <= branch_factor && child_tid + 1 < nproc)
998 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_go);
1001#if KMP_BARRIER_ICV_PUSH
1004 if (propagate_icvs) {
1006 team->
t.t_threads[child_tid], team,
1008 copy_icvs(&team->
t.t_implicit_task_taskdata[child_tid].td_icvs,
1009 &team->
t.t_implicit_task_taskdata[0].td_icvs);
1014 (
"__kmp_tree_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%u)"
1015 "go(%p): %u => %u\n",
1017 team->
t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1024 }
while (child <= branch_factor && child_tid < nproc);
1027 20, (
"__kmp_tree_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
1028 gtid, team->
t.t_id, tid, bt));
1040 kmp_uint32 num_threads = this_thr->th.th_team_nproc;
1048 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
1049 gtid, team->
t.t_id, tid, bt));
1050 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
1052#if USE_ITT_BUILD && USE_ITT_NOTIFY
1054 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
1055 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
1056 __itt_get_timestamp();
1062 for (
level = 0, offset = 1; offset < num_threads;
1063 level += branch_bits, offset <<= branch_bits) {
1067 if (((tid >>
level) & (branch_factor - 1)) != 0) {
1068 kmp_int32 parent_tid = tid & ~((1 << (
level + branch_bits)) - 1);
1072 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
1073 "arrived(%p): %llu => %llu\n",
1075 team->
t.t_id, parent_tid, &thr_bar->b_arrived,
1082 p_flag.
set_waiter(other_threads[parent_tid]);
1090 for (child = 1, child_tid = tid + (1 <<
level);
1091 child < branch_factor && child_tid < num_threads;
1092 child++, child_tid += (1 <<
level)) {
1093 kmp_info_t *child_thr = other_threads[child_tid];
1094 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1098 if (child + 1 < branch_factor && next_child_tid < num_threads)
1100 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_arrived);
1103 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%u) "
1104 "arrived(%p) == %llu\n",
1106 team->
t.t_id, child_tid, &child_bar->b_arrived, new_state));
1111#if USE_ITT_BUILD && USE_ITT_NOTIFY
1114 if (__kmp_forkjoin_frames_mode == 2) {
1115 this_thr->th.th_bar_min_time =
KMP_MIN(this_thr->th.th_bar_min_time,
1116 child_thr->th.th_bar_min_time);
1121 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
1123 team->
t.t_id, child_tid));
1126 (*reduce)(this_thr->th.th_local.reduce_data,
1127 child_thr->th.th_local.reduce_data);
1138 team->
t.t_bar[bt].b_arrived = new_state;
1139 KA_TRACE(20, (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) set team %d "
1140 "arrived(%p) = %llu\n",
1141 gtid, team->
t.t_id, tid, team->
t.t_id,
1142 &team->
t.t_bar[bt].b_arrived, team->
t.t_bar[bt].b_arrived));
1145 20, (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
1146 gtid, team->
t.t_id, tid, bt));
1150#define KMP_REVERSE_HYPER_BAR
1173 KA_TRACE(20, (
"__kmp_hyper_barrier_release: T#%d(%d:%d) primary enter for "
1174 "barrier type %d\n",
1175 gtid, team->
t.t_id, tid, bt));
1176#if KMP_BARRIER_ICV_PUSH
1177 if (propagate_icvs) {
1179 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
1183 KA_TRACE(20, (
"__kmp_hyper_barrier_release: T#%d wait go(%p) == %u\n", gtid,
1188#if USE_ITT_BUILD && USE_ITT_NOTIFY
1189 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
1193 __kmp_itt_task_starting(itt_sync_obj);
1199 if (itt_sync_obj != NULL)
1201 __kmp_itt_task_finished(itt_sync_obj);
1215 (
"__kmp_hyper_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
1219 num_threads = this_thr->th.th_team_nproc;
1220 other_threads = team->
t.t_threads;
1222#ifdef KMP_REVERSE_HYPER_BAR
1224 for (
level = 0, offset = 1;
1225 offset < num_threads && (((tid >>
level) & (branch_factor - 1)) == 0);
1226 level += branch_bits, offset <<= branch_bits)
1230 for (
level -= branch_bits, offset >>= branch_bits; offset != 0;
1231 level -= branch_bits, offset >>= branch_bits)
1234 for (
level = 0, offset = 1; offset < num_threads;
1235 level += branch_bits, offset <<= branch_bits)
1238#ifdef KMP_REVERSE_HYPER_BAR
1242 for (child = (child < branch_factor - 1) ? child : branch_factor - 1,
1243 child_tid = tid + (child <<
level);
1244 child >= 1; child--, child_tid -= (1 <<
level))
1246 if (((tid >>
level) & (branch_factor - 1)) != 0)
1251 for (child = 1, child_tid = tid + (1 <<
level);
1252 child < branch_factor && child_tid < num_threads;
1253 child++, child_tid += (1 <<
level))
1256 if (child_tid >= num_threads)
1259 kmp_info_t *child_thr = other_threads[child_tid];
1260 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1264#ifdef KMP_REVERSE_HYPER_BAR
1265 if (child - 1 >= 1 && next_child_tid < num_threads)
1267 if (child + 1 < branch_factor && next_child_tid < num_threads)
1270 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_go);
1273#if KMP_BARRIER_ICV_PUSH
1275 copy_icvs(&child_bar->th_fixed_icvs, &thr_bar->th_fixed_icvs);
1280 (
"__kmp_hyper_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%u)"
1281 "go(%p): %u => %u\n",
1283 team->
t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1291#if KMP_BARRIER_ICV_PUSH
1292 if (propagate_icvs &&
1296 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1297 &thr_bar->th_fixed_icvs);
1302 (
"__kmp_hyper_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
1303 gtid, team->
t.t_id, tid, bt));
1321 bool uninitialized = thr_bar->team == NULL;
1322 bool team_changed = team != thr_bar->team;
1323 bool team_sz_changed = nproc != thr_bar->nproc;
1324 bool tid_changed = tid != thr_bar->old_tid;
1325 bool retval =
false;
1327 if (uninitialized || team_sz_changed) {
1331 if (uninitialized || team_sz_changed || tid_changed) {
1332 thr_bar->my_level = thr_bar->depth - 1;
1333 thr_bar->parent_tid = -1;
1340 if (
d == thr_bar->depth - 2) {
1341 thr_bar->parent_tid = 0;
1342 thr_bar->my_level =
d;
1344 }
else if ((rem = tid % thr_bar->skip_per_level[
d + 1]) != 0) {
1347 thr_bar->parent_tid = tid - rem;
1348 thr_bar->my_level =
d;
1355 (thr_bar->skip_per_level[thr_bar->my_level])),
1356 &(thr_bar->offset));
1357 thr_bar->old_tid = tid;
1359 thr_bar->team = team;
1360 thr_bar->parent_bar =
1361 &team->
t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1363 if (uninitialized || team_changed || tid_changed) {
1364 thr_bar->team = team;
1365 thr_bar->parent_bar =
1366 &team->
t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1369 if (uninitialized || team_sz_changed || tid_changed) {
1370 thr_bar->nproc = nproc;
1371 thr_bar->leaf_kids = thr_bar->base_leaf_kids;
1372 if (thr_bar->my_level == 0)
1373 thr_bar->leaf_kids = 0;
1374 if (thr_bar->leaf_kids && (
kmp_uint32)tid + thr_bar->leaf_kids + 1 > nproc)
1376 thr_bar->leaf_state = 0;
1377 for (
int i = 0;
i < thr_bar->leaf_kids; ++
i)
1378 ((
char *)&(thr_bar->leaf_state))[7 -
i] = 1;
1389 kmp_uint32 nproc = this_thr->th.th_team_nproc;
1393 int level = team->
t.t_level;
1394 if (other_threads[0]
1395 ->th.th_teams_microtask)
1396 if (this_thr->th.th_teams_size.nteams > 1)
1399 thr_bar->use_oncore_barrier = 1;
1401 thr_bar->use_oncore_barrier = 0;
1403 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) enter for "
1404 "barrier type %d\n",
1405 gtid, team->
t.t_id, tid, bt));
1406 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
1408#if USE_ITT_BUILD && USE_ITT_NOTIFY
1410 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
1411 this_thr->th.th_bar_arrive_time = __itt_get_timestamp();
1418 if (thr_bar->my_level) {
1423 thr_bar->use_oncore_barrier) {
1424 if (thr_bar->leaf_kids) {
1428 ? thr_bar->b_arrived | thr_bar->leaf_state
1429 : team->
t.t_bar[bt].b_arrived | thr_bar->leaf_state;
1430 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) waiting "
1432 gtid, team->
t.t_id, tid));
1438 for (child_tid = tid + 1; child_tid <= tid + thr_bar->leaf_kids;
1440 KA_TRACE(100, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
1442 gtid, team->
t.t_id, tid,
1445 (*reduce)(this_thr->th.th_local.reduce_data,
1446 other_threads[child_tid]->th.th_local.reduce_data);
1456 kmp_uint32 last = tid + thr_bar->skip_per_level[
d + 1],
1457 skip = thr_bar->skip_per_level[
d];
1460 for (child_tid = tid + skip; child_tid < (
int)last; child_tid += skip) {
1461 kmp_info_t *child_thr = other_threads[child_tid];
1462 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1463 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait "
1465 "arrived(%p) == %llu\n",
1466 gtid, team->
t.t_id, tid,
1468 child_tid, &child_bar->b_arrived, new_state));
1472 KA_TRACE(100, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
1474 gtid, team->
t.t_id, tid,
1477 (*reduce)(this_thr->th.th_local.reduce_data,
1478 child_thr->th.th_local.reduce_data);
1485 kmp_uint32 last = tid + thr_bar->skip_per_level[
d + 1],
1486 skip = thr_bar->skip_per_level[
d];
1489 for (child_tid = tid + skip; child_tid < (
int)last; child_tid += skip) {
1490 kmp_info_t *child_thr = other_threads[child_tid];
1491 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1492 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait "
1494 "arrived(%p) == %llu\n",
1495 gtid, team->
t.t_id, tid,
1497 child_tid, &child_bar->b_arrived, new_state));
1501 KA_TRACE(100, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
1503 gtid, team->
t.t_id, tid,
1506 (*reduce)(this_thr->th.th_local.reduce_data,
1507 child_thr->th.th_local.reduce_data);
1516 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) releasing"
1517 " T#%d(%d:%d) arrived(%p): %llu => %llu\n",
1518 gtid, team->
t.t_id, tid,
1520 thr_bar->parent_tid, &thr_bar->b_arrived, thr_bar->b_arrived,
1526 !thr_bar->use_oncore_barrier) {
1529 other_threads[thr_bar->parent_tid]);
1535 thr_bar->offset + 1);
1536 flag.set_waiter(other_threads[thr_bar->parent_tid]);
1540 team->
t.t_bar[bt].b_arrived = new_state;
1541 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) set team %d "
1542 "arrived(%p) = %llu\n",
1543 gtid, team->
t.t_id, tid, team->
t.t_id,
1544 &team->
t.t_bar[bt].b_arrived, team->
t.t_bar[bt].b_arrived));
1547 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) exit for "
1548 "barrier type %d\n",
1549 gtid, team->
t.t_id, tid, bt));
1559 bool team_change =
false;
1564 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) primary "
1565 "entered barrier type %d\n",
1566 gtid, team->
t.t_id, tid, bt));
1569 if (!thr_bar->use_oncore_barrier ||
1571 thr_bar->team == NULL) {
1576 TCW_8(thr_bar->b_go,
1583 thr_bar->offset + 1, bt,
1586 if (thr_bar->wait_flag ==
1588 TCW_8(thr_bar->b_go,
1591 (
RCAST(
volatile char *,
1592 &(thr_bar->parent_bar->b_go)))[thr_bar->offset + 1] = 0;
1606 (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
1611 nproc = this_thr->th.th_team_nproc;
1612 int level = team->
t.t_level;
1613 if (team->
t.t_threads[0]
1614 ->th.th_teams_microtask) {
1616 this_thr->th.th_teams_level ==
level)
1618 if (this_thr->th.th_teams_size.nteams > 1)
1622 thr_bar->use_oncore_barrier = 1;
1624 thr_bar->use_oncore_barrier = 0;
1628 unsigned short int old_leaf_kids = thr_bar->leaf_kids;
1629 kmp_uint64 old_leaf_state = thr_bar->leaf_state;
1636#if KMP_BARRIER_ICV_PUSH
1637 if (propagate_icvs) {
1643 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
1645 thr_bar->use_oncore_barrier) {
1646 if (!thr_bar->my_level)
1649 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1650 &thr_bar->parent_bar->th_fixed_icvs);
1653 if (thr_bar->my_level)
1655 copy_icvs(&thr_bar->th_fixed_icvs, &thr_bar->parent_bar->th_fixed_icvs);
1657 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1658 &thr_bar->parent_bar->th_fixed_icvs);
1664 if (thr_bar->my_level) {
1668 thr_bar->use_oncore_barrier) {
1678 for (child_tid = thr_bar->skip_per_level[1]; child_tid < (
int)nproc;
1679 child_tid += thr_bar->skip_per_level[1]) {
1681 &team->
t.t_threads[child_tid]->th.th_bar[bt].bb;
1682 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) "
1683 "releasing T#%d(%d:%d)"
1684 " go(%p): %u => %u\n",
1685 gtid, team->
t.t_id, tid,
1687 child_tid, &child_bar->b_go, child_bar->b_go,
1691 ngo_store_go(&child_bar->th_fixed_icvs, &thr_bar->th_fixed_icvs);
1695 TCW_8(thr_bar->b_go,
1698 if (thr_bar->leaf_kids) {
1701 old_leaf_kids < thr_bar->leaf_kids) {
1702 if (old_leaf_kids) {
1703 thr_bar->b_go |= old_leaf_state;
1706 last = tid + thr_bar->skip_per_level[1];
1709 for (child_tid = tid + 1 + old_leaf_kids; child_tid < (
int)last;
1711 kmp_info_t *child_thr = team->
t.t_threads[child_tid];
1712 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1715 (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing"
1716 " T#%d(%d:%d) go(%p): %u => %u\n",
1718 team->
t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1726 thr_bar->b_go |= thr_bar->leaf_state;
1730 for (
int d = thr_bar->my_level - 1;
d >= 0;
1732 last = tid + thr_bar->skip_per_level[
d + 1];
1736 for (child_tid = tid + skip; child_tid < (
int)last; child_tid += skip) {
1737 kmp_info_t *child_thr = team->
t.t_threads[child_tid];
1738 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1739 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) "
1740 "releasing T#%d(%d:%d) go(%p): %u => %u\n",
1741 gtid, team->
t.t_id, tid,
1743 child_tid, &child_bar->b_go, child_bar->b_go,
1751#if KMP_BARRIER_ICV_PUSH
1754 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1755 &thr_bar->th_fixed_icvs);
1758 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) exit for "
1759 "barrier type %d\n",
1760 gtid, team->
t.t_id, tid, bt));
1781 constexpr operator bool()
const {
return false; }
1792template <
bool cancellable = false>
1794 size_t reduce_size,
void *reduce_data,
1795 void (*reduce)(
void *,
void *)) {
1803#if OMPT_SUPPORT && OMPT_OPTIONAL
1804 ompt_data_t *my_task_data;
1805 ompt_data_t *my_parallel_data;
1806 void *return_address;
1807 ompt_sync_region_t barrier_kind;
1810 KA_TRACE(15, (
"__kmp_barrier: T#%d(%d:%d) has arrived\n", gtid,
1816 my_task_data = OMPT_CUR_TASK_DATA(this_thr);
1817 my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
1818 return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
1822 barrier_kind, ompt_scope_begin, my_parallel_data, my_task_data,
1827 barrier_kind, ompt_scope_begin, my_parallel_data, my_task_data,
1834 auto *ompt_thr_info = &this_thr->th.ompt_thread_info;
1835 switch (barrier_kind) {
1836 case ompt_sync_region_barrier_explicit:
1837 ompt_thr_info->state = ompt_state_wait_barrier_explicit;
1839 case ompt_sync_region_barrier_implicit_workshare:
1840 ompt_thr_info->state = ompt_state_wait_barrier_implicit_workshare;
1842 case ompt_sync_region_barrier_implicit_parallel:
1843 ompt_thr_info->state = ompt_state_wait_barrier_implicit_parallel;
1845 case ompt_sync_region_barrier_teams:
1846 ompt_thr_info->state = ompt_state_wait_barrier_teams;
1848 case ompt_sync_region_barrier_implementation:
1851 ompt_thr_info->state = ompt_state_wait_barrier_implementation;
1856#if ENABLE_LIBOMPTARGET
1859 if (
UNLIKELY(kmp_target_sync_cb != NULL))
1860 (*kmp_target_sync_cb)(
1864 if (!team->
t.t_serialized) {
1867 void *itt_sync_obj = NULL;
1869 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1870 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
1876 (
"__kmp_barrier: T#%d(%d:%d) past tasking barrier\n", gtid,
1886 this_thr->th.th_team_bt_intervals =
1887 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
1888 this_thr->th.th_team_bt_set =
1889 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
1896 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1897 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
1902 team->
t.t_bar[bt].b_master_arrived += 1;
1904 this_thr->th.th_bar[bt].bb.b_worker_arrived += 1;
1907 if (reduce != NULL) {
1909 this_thr->th.th_local.reduce_data = reduce_data;
1957 team->
t.t_bar[bt].b_team_arrived += 1;
1973 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1974 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
1976#if USE_ITT_BUILD && USE_ITT_NOTIFY
1978 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
1979 __kmp_forkjoin_frames_mode &&
1980 (this_thr->th.th_teams_microtask == NULL ||
1981 this_thr->th.th_teams_size.nteams == 1) &&
1982 team->
t.t_active_level == 1) {
1986 int nproc = this_thr->th.th_team_nproc;
1988 switch (__kmp_forkjoin_frames_mode) {
1990 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
1992 this_thr->th.th_frame_time = cur_time;
1996 __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time,
2000 if (__itt_metadata_add_ptr) {
2002 kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
2005 this_thr->th.th_bar_arrive_time = 0;
2006 for (
i = 1;
i < nproc; ++
i) {
2007 delta += (cur_time - other_threads[
i]->th.th_bar_arrive_time);
2008 other_threads[
i]->th.th_bar_arrive_time = 0;
2010 __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time,
2014 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
2016 this_thr->th.th_frame_time = cur_time;
2024 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2025 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
2028 if ((
status == 1 || !is_split) && !cancelled) {
2072 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2073 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2078 if (this_thr->th.th_task_team != NULL) {
2080 void *itt_sync_obj = NULL;
2081 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2082 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
2083 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
2088 this_thr->th.th_task_team->tt.tt_found_proxy_tasks ==
TRUE ||
2089 this_thr->th.th_task_team->tt.tt_hidden_helper_task_encountered ==
2095 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2096 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2101 KA_TRACE(15, (
"__kmp_barrier: T#%d(%d:%d) is leaving with return value %d\n",
2110 barrier_kind, ompt_scope_end, my_parallel_data, my_task_data,
2115 barrier_kind, ompt_scope_end, my_parallel_data, my_task_data,
2119 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
2124 return (
int)cancelled;
2130 size_t reduce_size,
void *reduce_data,
2131 void (*reduce)(
void *,
void *)) {
2136#if defined(KMP_GOMP_COMPAT)
2168 if (!team->
t.t_serialized) {
2218 void *itt_sync_obj = NULL;
2220 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2225#if ((USE_ITT_BUILD && USE_ITT_NOTIFY) || defined KMP_DEBUG)
2226 int nproc = this_thr->th.th_team_nproc;
2231 team = this_thr->th.th_team;
2235 team_id = team->
t.t_id;
2236 kmp_info_t *master_thread = this_thr->th.th_team_master;
2237 if (master_thread != team->
t.t_threads[0]) {
2248 KA_TRACE(10, (
"__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n",
2249 gtid, team_id, tid));
2254 ompt_data_t *my_task_data;
2255 ompt_data_t *my_parallel_data;
2256 void *codeptr = NULL;
2257 int ds_tid = this_thr->th.th_info.ds.ds_tid;
2261 codeptr = team->
t.ompt_team_info.master_return_address;
2262 my_task_data = OMPT_CUR_TASK_DATA(this_thr);
2263 my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
2264 ompt_sync_region_t sync_kind = ompt_sync_region_barrier_implicit_parallel;
2265 ompt_state_t ompt_state = ompt_state_wait_barrier_implicit_parallel;
2266 if (this_thr->th.ompt_thread_info.parallel_flags & ompt_parallel_league) {
2267 sync_kind = ompt_sync_region_barrier_teams;
2268 ompt_state = ompt_state_wait_barrier_teams;
2272 sync_kind, ompt_scope_begin, my_parallel_data, my_task_data, codeptr);
2276 sync_kind, ompt_scope_begin, my_parallel_data, my_task_data, codeptr);
2279 this_thr->th.ompt_thread_info.task_data = *OMPT_CUR_TASK_DATA(this_thr);
2281 this_thr->th.ompt_thread_info.state = ompt_state;
2287 KA_TRACE(10, (
"__kmp_join_barrier: T#%d(%d:%d) past tasking barrier\n",
2288 gtid, team_id, tid));
2292 KA_TRACE(20, (
"__kmp_join_barrier: T#%d, old team = %d, old task_team = "
2293 "%p, th_task_team = %p\n",
2295 team->
t.t_task_team[this_thr->th.th_task_state],
2296 this_thr->th.th_task_team));
2308 this_thr->th.th_team_bt_intervals =
2309 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2310 this_thr->th.th_team_bt_set =
2311 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2318 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2319 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
2360#if KMP_STATS_ENABLED
2364 for (
int i = 0;
i < team->
t.t_nproc; ++
i) {
2366 if (team_thread == this_thr)
2368 team_thread->th.th_stats->setIdleFlag();
2370 team_thread->th.th_sleep_loc != NULL)
2375 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2376 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
2379#if USE_ITT_BUILD && USE_ITT_NOTIFY
2381 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2382 __kmp_forkjoin_frames_mode &&
2383 (this_thr->th.th_teams_microtask == NULL ||
2384 this_thr->th.th_teams_size.nteams == 1) &&
2385 team->
t.t_active_level == 1) {
2389 switch (__kmp_forkjoin_frames_mode) {
2391 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
2395 __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time, 1,
2399 if (__itt_metadata_add_ptr) {
2401 kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
2404 this_thr->th.th_bar_arrive_time = 0;
2405 for (
int i = 1;
i < nproc; ++
i) {
2406 delta += (cur_time - other_threads[
i]->th.th_bar_arrive_time);
2407 other_threads[
i]->th.th_bar_arrive_time = 0;
2409 __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time,
2410 cur_time, delta, 0);
2412 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
2414 this_thr->th.th_frame_time = cur_time;
2422 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2423 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
2431 (
"__kmp_join_barrier: T#%d(%d:%d) says all %d team threads arrived\n",
2432 gtid, team_id, tid, nproc));
2439 (
"__kmp_join_barrier: T#%d(%d:%d) leaving\n", gtid, team_id, tid));
2449 kmp_team_t *team = (tid == 0) ? this_thr->th.th_team : NULL;
2451 void *itt_sync_obj = NULL;
2455 KA_TRACE(10, (
"__kmp_fork_barrier: T#%d(%d:%d) has arrived\n", gtid,
2456 (team != NULL) ? team->
t.t_id : -1, tid));
2460#if USE_ITT_BUILD && USE_ITT_NOTIFY
2461 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2464 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
2476 for (
i = 1;
i < team->
t.t_nproc; ++
i) {
2478 (
"__kmp_fork_barrier: T#%d(%d:0) checking T#%d(%d:%d) fork go "
2480 gtid, team->
t.t_id, other_threads[
i]->th.th_info.ds.ds_gtid,
2481 team->
t.t_id, other_threads[
i]->th.th_info.ds.ds_tid,
2501 this_thr->th.th_team_bt_intervals =
2502 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2503 this_thr->th.th_team_bt_set =
2504 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2541 ompt_state_t ompt_state = this_thr->th.ompt_thread_info.state;
2543 (ompt_state == ompt_state_wait_barrier_teams ||
2544 ompt_state == ompt_state_wait_barrier_implicit_parallel)) {
2545 int ds_tid = this_thr->th.th_info.ds.ds_tid;
2546 ompt_data_t *task_data = (team)
2547 ? OMPT_CUR_TASK_DATA(this_thr)
2548 : &(this_thr->th.ompt_thread_info.task_data);
2549 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
2551 void *codeptr = NULL;
2555 codeptr = team ? team->
t.ompt_team_info.master_return_address : NULL;
2556 ompt_sync_region_t sync_kind = ompt_sync_region_barrier_implicit_parallel;
2557 if (this_thr->th.ompt_thread_info.parallel_flags & ompt_parallel_league)
2558 sync_kind = ompt_sync_region_barrier_teams;
2561 sync_kind, ompt_scope_end, NULL, task_data, codeptr);
2565 sync_kind, ompt_scope_end, NULL, task_data, codeptr);
2570 ompt_scope_end, NULL, task_data, 0, ds_tid,
2571 ompt_task_implicit);
2578 this_thr->th.th_task_team = NULL;
2580#if USE_ITT_BUILD && USE_ITT_NOTIFY
2581 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2585 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2589 KA_TRACE(10, (
"__kmp_fork_barrier: T#%d is leaving early\n", gtid));
2601#if KMP_BARRIER_ICV_PULL
2614 (
"__kmp_fork_barrier: T#%d(%d) is PULLing ICVs\n", gtid, tid));
2617 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
2618 &team->
t.t_threads[0]
2629#if KMP_AFFINITY_SUPPORTED
2633 if (__kmp_affinity.type == affinity_balanced && team->
t.t_size_changed) {
2634 __kmp_balanced_affinity(this_thr, team->
t.t_nproc);
2637 if (this_thr->th.th_new_place == this_thr->th.th_current_place) {
2638 KA_TRACE(100, (
"__kmp_fork_barrier: T#%d already in correct place %d\n",
2640 this_thr->th.th_current_place));
2642 __kmp_affinity_bind_place(gtid);
2648 if (team->
t.t_display_affinity
2650 || (__kmp_affinity.type == affinity_balanced && team->
t.t_size_changed)
2655 this_thr->th.th_prev_num_threads = team->
t.t_nproc;
2656 this_thr->th.th_prev_level = team->
t.t_level;
2662#if USE_ITT_BUILD && USE_ITT_NOTIFY
2663 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2667 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2671 KA_TRACE(10, (
"__kmp_fork_barrier: T#%d(%d:%d) is leaving\n", gtid,
2672 team->
t.t_id, tid));
2685#if KMP_BARRIER_ICV_PULL
2694 KF_TRACE(10, (
"__kmp_setup_icv_copy: PULL: T#%d this_thread=%p team=%p\n", 0,
2695 team->
t.t_threads[0], team));
2696#elif KMP_BARRIER_ICV_PUSH
2699 KF_TRACE(10, (
"__kmp_setup_icv_copy: PUSH: T#%d this_thread=%p team=%p\n", 0,
2700 team->
t.t_threads[0], team));
2707 for (
int f = 1;
f < new_nproc; ++
f) {
2709 KF_TRACE(10, (
"__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2710 f, team->
t.t_threads[
f], team));
2713 KF_TRACE(10, (
"__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2714 f, team->
t.t_threads[
f], team));
size_t KMP_ALIGN_CACHE gos_per_group
size_t KMP_ALIGN_CACHE num_groups
size_t KMP_ALIGN_CACHE threads_per_group
size_t KMP_ALIGN_CACHE num_gos
size_t KMP_ALIGN_CACHE threads_per_go
size_t KMP_ALIGN_CACHE num_threads
size_t KMP_ALIGN_CACHE max_threads
static void deallocate(distributedBarrier *db)
flags_s * flags[MAX_ITERS]
distributedBarrier()=delete
bool KMP_ALIGN_CACHE fix_threads_per_go
bool wait(kmp_info_t *this_thr, int final_spin USE_ITT_BUILD_ARG(void *itt_sync_obj))
bool wait(kmp_info_t *this_thr, int final_spin USE_ITT_BUILD_ARG(void *itt_sync_obj))
bool wait(kmp_info_t *this_thr, int final_spin USE_ITT_BUILD_ARG(void *itt_sync_obj))
void set_waiter(kmp_info_t *thr)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
#define KMP_INTERNAL_MALLOC(sz)
kmp_global_t __kmp_global
union kmp_task_team kmp_task_team_t
void __kmp_teams_master(int gtid)
#define KMP_MAX_BLOCKTIME
#define KMP_INTERNAL_REALLOC(p, sz)
void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team)
#define KMP_TASKDATA_TO_TASK(taskdata)
#define KMP_NOT_SAFE_TO_REAP
static kmp_team_t * __kmp_team_from_gtid(int gtid)
kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier]
kmp_tasking_mode_t __kmp_tasking_mode
void __kmp_abort_thread(void)
void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar)
int __kmp_omp_cancellation
#define KMP_BARRIER_UNUSED_STATE
int __kmp_barrier_gomp_cancel(int gtid)
#define KMP_BARRIER_SLEEP_STATE
struct kmp_internal_control kmp_internal_control_t
static int __kmp_tid_from_gtid(int gtid)
#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr)
#define KMP_CHECK_UPDATE(a, b)
#define KMP_MASTER_TID(tid)
kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier]
#define KMP_BARRIER_OWN_FLAG
void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr, kmp_team_t *team, int tid, int set_curr_task)
static void copy_icvs(kmp_internal_control_t *dst, kmp_internal_control_t *src)
#define KMP_TASKING_ENABLED(task_team)
kmp_info_t ** __kmp_threads
#define KMP_BARRIER_PARENT_FLAG
union kmp_team kmp_team_t
#define KMP_MASTER_GTID(gtid)
volatile int __kmp_init_parallel
#define __kmp_allocate(size)
void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid)
void __kmp_aux_display_affinity(int gtid, const char *format)
union kmp_barrier_team_union kmp_balign_team_t
#define KMP_INIT_BARRIER_STATE
kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier]
#define KMP_BARRIER_NOT_WAITING
#define KMP_INTERNAL_FREE(p)
static int __kmp_gtid_from_tid(int tid, const kmp_team_t *team)
#define KMP_BARRIER_SWITCHING
int __kmp_display_affinity
#define KMP_BLOCKTIME_INTERVAL(team, tid)
void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team)
int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
#define KMP_BARRIER_STATE_BUMP
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
static int __kmp_gtid_from_thread(const kmp_info_t *thr)
static void __kmp_type_convert(T1 src, T2 *dest)
struct KMP_ALIGN_CACHE kmp_bstate kmp_bstate_t
kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier]
union KMP_ALIGN_CACHE kmp_info kmp_info_t
void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team, int wait=1)
kmp_topology_t * __kmp_topology
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86<<, 2i, 1, KMP_ARCH_X86) ATOMIC_CMPXCHG(fixed2, shr, kmp_int16, 16, > KMP_ARCH_X86 KMP_ARCH_X86 kmp_uint32
static bool __kmp_linear_barrier_gather_template(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_dist_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_tree_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
#define ngo_store_icvs(dst, src)
static bool __kmp_linear_barrier_release_template(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
#define ngo_store_go(dst, src)
int __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, void *reduce_data, void(*reduce)(void *, void *))
void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc, kmp_internal_control_t *new_icvs, ident_t *loc)
static void __kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_join_barrier(int gtid)
static void __kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_end_split_barrier(enum barrier_type bt, int gtid)
static bool __kmp_linear_barrier_release_cancellable(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
static bool __kmp_linear_barrier_gather_cancellable(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, void *reduce_data, void(*reduce)(void *, void *))
static void __kmp_linear_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_print_structure(void)
static void __kmp_hyper_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_dist_barrier_wakeup(enum barrier_type bt, kmp_team_t *team, size_t start, size_t stop, size_t inc, size_t tid)
static void __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
static bool __kmp_init_hierarchical_barrier_thread(enum barrier_type bt, kmp_bstate_t *thr_bar, kmp_uint32 nproc, int gtid, int tid, kmp_team_t *team)
void __kmp_fork_barrier(int gtid, int tid)
static void __kmp_dist_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_linear_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
#define KMP_OPTIMIZE_FOR_REDUCTIONS
#define KMP_ALIGNED_FREE(ptr)
#define KMP_DEBUG_ASSERT(cond)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
#define USE_ITT_BUILD_ARG(x)
void(* microtask_t)(int *gtid, int *npr,...)
#define KMP_TEST_THEN_AND64(p, v)
#define KMP_CACHE_PREFETCH(ADDR)
#define KMP_ATOMIC_ST_RLX(p, v)
#define KMP_ATOMIC_LD_RLX(p)
#define KMP_AFFINITY_SUPPORTED
#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv)
Functions for collecting statistics.
#define KMP_SET_THREAD_STATE_BLOCK(state_name)
#define KMP_TIME_PARTITIONED_BLOCK(name)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n)
static void __kmp_null_resume_wrapper(kmp_info_t *thr)
ompt_callbacks_active_t ompt_enabled
ompt_callbacks_internal_t ompt_callbacks
ompt_sync_region_t __ompt_get_barrier_kind(enum barrier_type bt, kmp_info_t *thr)
#define OMPT_REDUCTION_BEGIN
#define OMPT_REDUCTION_DECL(this_thr, gtid)
#define OMPT_REDUCTION_END
is_cancellable & operator=(bool b)
is_cancellable & operator=(bool b)
KMP_ALIGN_CACHE volatile kmp_uint32 tt_active