24#define USE_NGO_STORES 1
27#if KMP_MIC && USE_NGO_STORES
29#define ngo_load(src) __m512d Vt = _mm512_load_pd((void *)(src))
30#define ngo_store_icvs(dst, src) _mm512_storenrngo_pd((void *)(dst), Vt)
31#define ngo_store_go(dst, src) _mm512_storenrngo_pd((void *)(dst), Vt)
32#define ngo_sync() __asm__ volatile("lock; addl $0,0(%%rsp)" ::: "memory")
34#define ngo_load(src) ((void)0)
35#define ngo_store_icvs(dst, src) copy_icvs((dst), (src))
36#define ngo_store_go(dst, src) KMP_MEMCPY((dst), (src), CACHE_LINE)
37#define ngo_sync() ((void)0)
47void distributedBarrier::computeVarsForN(
size_t n) {
52 int ncores_per_socket =
58 if (ncores_per_socket <= 0)
59 ncores_per_socket = 1;
78 if (nsockets == 1 ||
num_gos == 1)
109void distributedBarrier::computeGo(
size_t n) {
128void distributedBarrier::resize(
size_t nthr) {
166 go[
j].go.store(next_go);
183void distributedBarrier::init(
size_t nthr) {
200 computeVarsForN(nthr);
211 size_t start,
size_t stop,
size_t inc,
218 for (
size_t thr = start; thr <
stop; thr += inc) {
220 int gtid = other_threads[thr]->th.th_info.ds.ds_gtid;
237 team = this_thr->th.th_team;
238 nproc = this_thr->th.th_team_nproc;
239 other_threads = team->
t.t_threads;
241 my_current_iter =
b->iter[tid].iter;
243 group_leader = ((tid %
b->threads_per_group) == 0);
246 (
"__kmp_dist_barrier_gather: T#%d(%d:%d) enter; barrier type %d\n",
247 gtid, team->
t.t_id, tid, bt));
249#if USE_ITT_BUILD && USE_ITT_NOTIFY
251 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
252 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
253 __itt_get_timestamp();
259 size_t group_start = tid + 1;
260 size_t group_end = tid +
b->threads_per_group;
261 size_t threads_pending = 0;
263 if (group_end > nproc)
268 for (
size_t thr = group_start; thr < group_end; thr++) {
270 threads_pending +=
b->flags[my_current_iter][thr].stillNeed;
275 if (task_team != NULL) {
278 int tasks_completed =
FALSE;
297 }
while (threads_pending > 0);
303 for (
size_t thr = group_start; thr < group_end; thr++) {
304 (*reduce)(this_thr->th.th_local.reduce_data,
305 other_threads[thr]->th.th_local.reduce_data);
311 b->flags[my_next_iter][tid].stillNeed = 1;
314 b->flags[my_current_iter][tid].stillNeed = 0;
318 for (
size_t thr = 0; thr < nproc; thr +=
b->threads_per_group) {
319 threads_pending +=
b->flags[my_current_iter][thr].stillNeed;
324 if (task_team != NULL) {
327 int tasks_completed =
FALSE;
346 }
while (threads_pending > 0);
352 for (
size_t thr =
b->threads_per_group; thr < nproc;
353 thr +=
b->threads_per_group) {
354 (*reduce)(this_thr->th.th_local.reduce_data,
355 other_threads[thr]->th.th_local.reduce_data);
362 b->flags[my_next_iter][tid].stillNeed = 1;
365 b->flags[my_current_iter][tid].stillNeed = 0;
371 (
"__kmp_dist_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
372 gtid, team->
t.t_id, tid, bt));
386 KA_TRACE(20, (
"__kmp_dist_barrier_release: T#%d(%d) enter; barrier type %d\n",
389 thr_bar = &this_thr->th.th_bar[bt].bb;
394 if (this_thr->th.th_used_in_team.load() != 1 &&
395 this_thr->th.th_used_in_team.load() != 3) {
403 this_thr->th.th_used_in_team.load() == 0) {
406#if USE_ITT_BUILD && USE_ITT_NOTIFY
407 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
412 __kmp_itt_task_starting(itt_sync_obj);
418 if (itt_sync_obj != NULL)
420 __kmp_itt_task_finished(itt_sync_obj);
426 if (this_thr->th.th_used_in_team.load() != 1 &&
427 this_thr->th.th_used_in_team.load() != 3)
439 team = this_thr->th.th_team;
443 my_current_iter =
b->iter[tid].iter;
445 my_go_index = tid /
b->threads_per_go;
446 if (this_thr->th.th_used_in_team.load() == 3) {
451 if (
b->go[my_go_index].go.load() != next_go) {
454 &(
b->go[my_go_index].go), next_go, &(
b->sleep[tid].sleep));
457 b->iter[tid].iter == 0);
468 if (this_thr->th.th_used_in_team.load() == 1)
475 group_leader = ((tid %
b->threads_per_group) == 0);
478 for (
size_t go_idx = my_go_index + 1;
479 go_idx < my_go_index +
b->gos_per_group; go_idx++) {
480 b->go[go_idx].go.store(next_go);
486#if KMP_BARRIER_ICV_PUSH
487 if (propagate_icvs) {
490 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
493 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
499 size_t nproc = this_thr->th.th_team_nproc;
500 size_t group_end = tid +
b->threads_per_group;
501 if (nproc < group_end)
506 team = this_thr->th.th_team;
508 my_current_iter =
b->iter[tid].iter;
510#if KMP_BARRIER_ICV_PUSH
511 if (propagate_icvs) {
514 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
518 for (
size_t go_idx = 0; go_idx <
b->num_gos; go_idx +=
b->gos_per_group) {
519 b->go[go_idx].go.store(next_go);
524 size_t nproc = this_thr->th.th_team_nproc;
526 b->threads_per_group, tid);
530 for (
size_t go_idx = 1; go_idx <
b->gos_per_group; go_idx++) {
531 b->go[go_idx].go.store(next_go);
539 size_t nproc = this_thr->th.th_team_nproc;
540 size_t group_end = tid +
b->threads_per_group;
541 if (nproc < group_end)
551 20, (
"__kmp_dist_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
552 gtid, team->
t.t_id, tid, bt));
556template <
bool cancellable = false>
567 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
568 gtid, team->
t.t_id, tid, bt));
569 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
571#if USE_ITT_BUILD && USE_ITT_NOTIFY
573 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
574 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
575 __itt_get_timestamp();
582 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d)"
583 "arrived(%p): %llu => %llu\n",
585 team->
t.t_id, 0, &thr_bar->b_arrived, thr_bar->b_arrived,
595 int nproc = this_thr->th.th_team_nproc;
601 for (
i = 1;
i < nproc; ++
i) {
607 KA_TRACE(20, (
"__kmp_linear_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%d) "
608 "arrived(%p) == %llu\n",
611 &other_threads[
i]->th.th_bar[bt].bb.b_arrived, new_state));
616 &other_threads[
i]->th.th_bar[bt].bb.b_arrived, new_state);
624#if USE_ITT_BUILD && USE_ITT_NOTIFY
627 if (__kmp_forkjoin_frames_mode == 2) {
628 this_thr->th.th_bar_min_time =
KMP_MIN(
629 this_thr->th.th_bar_min_time, other_threads[
i]->th.th_bar_min_time);
634 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
639 (*reduce)(this_thr->th.th_local.reduce_data,
640 other_threads[
i]->th.th_local.reduce_data);
646 KA_TRACE(20, (
"__kmp_linear_barrier_gather: T#%d(%d:%d) set team %d "
647 "arrived(%p) = %llu\n",
648 gtid, team->
t.t_id, tid, team->
t.t_id, &team_bar->
b_arrived,
653 (
"__kmp_linear_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
654 gtid, team->
t.t_id, tid, bt));
658template <
bool cancellable = false>
668 kmp_uint32 nproc = this_thr->th.th_team_nproc;
673 other_threads = team->
t.t_threads;
675 KA_TRACE(20, (
"__kmp_linear_barrier_release: T#%d(%d:%d) primary enter for "
677 gtid, team->
t.t_id, tid, bt));
680#if KMP_BARRIER_ICV_PUSH
683 if (propagate_icvs) {
684 ngo_load(&team->
t.t_implicit_task_taskdata[0].td_icvs);
685 for (
i = 1;
i < nproc; ++
i) {
689 &team->
t.t_implicit_task_taskdata[0].td_icvs);
697 for (
i = 1;
i < nproc; ++
i) {
705 (
"__kmp_linear_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%d) "
706 "go(%p): %u => %u\n",
707 gtid, team->
t.t_id, tid, other_threads[
i]->th.th_info.ds.ds_gtid,
708 team->
t.t_id,
i, &other_threads[
i]->th.th_bar[bt].bb.b_go,
709 other_threads[
i]->th.th_bar[bt].bb.b_go,
717 KA_TRACE(20, (
"__kmp_linear_barrier_release: T#%d wait go(%p) == %u\n",
727#if USE_ITT_BUILD && USE_ITT_NOTIFY
728 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
733 __kmp_itt_task_starting(itt_sync_obj);
739 if (itt_sync_obj != NULL)
741 __kmp_itt_task_finished(itt_sync_obj);
755 (
"__kmp_linear_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
761 (
"__kmp_linear_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
762 gtid, team->
t.t_id, tid, bt));
769 __kmp_linear_barrier_gather_template<false>(
776 return __kmp_linear_barrier_gather_template<true>(
783 __kmp_linear_barrier_release_template<false>(
790 return __kmp_linear_barrier_release_template<true>(
802 kmp_uint32 nproc = this_thr->th.th_team_nproc;
810 20, (
"__kmp_tree_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
811 gtid, team->
t.t_id, tid, bt));
812 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
814#if USE_ITT_BUILD && USE_ITT_NOTIFY
816 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
817 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
818 __itt_get_timestamp();
823 child_tid = (tid << branch_bits) + 1;
824 if (child_tid < nproc) {
829 kmp_info_t *child_thr = other_threads[child_tid];
833 if (child + 1 <= branch_factor && child_tid + 1 < nproc)
835 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_arrived);
838 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%u) "
839 "arrived(%p) == %llu\n",
841 team->
t.t_id, child_tid, &child_bar->b_arrived, new_state));
845#if USE_ITT_BUILD && USE_ITT_NOTIFY
848 if (__kmp_forkjoin_frames_mode == 2) {
849 this_thr->th.th_bar_min_time =
KMP_MIN(this_thr->th.th_bar_min_time,
850 child_thr->th.th_bar_min_time);
855 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
857 team->
t.t_id, child_tid));
860 (*reduce)(this_thr->th.th_local.reduce_data,
861 child_thr->th.th_local.reduce_data);
866 }
while (child <= branch_factor && child_tid < nproc);
870 kmp_int32 parent_tid = (tid - 1) >> branch_bits;
873 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
874 "arrived(%p): %llu => %llu\n",
876 team->
t.t_id, parent_tid, &thr_bar->b_arrived, thr_bar->b_arrived,
888 team->
t.t_bar[bt].b_arrived = new_state;
891 KA_TRACE(20, (
"__kmp_tree_barrier_gather: T#%d(%d:%d) set team %d "
892 "arrived(%p) = %llu\n",
893 gtid, team->
t.t_id, tid, team->
t.t_id,
894 &team->
t.t_bar[bt].b_arrived, team->
t.t_bar[bt].b_arrived));
897 (
"__kmp_tree_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
898 gtid, team->
t.t_id, tid, bt));
916 KA_TRACE(20, (
"__kmp_tree_barrier_release: T#%d wait go(%p) == %u\n", gtid,
921#if USE_ITT_BUILD && USE_ITT_NOTIFY
922 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
927 __kmp_itt_task_starting(itt_sync_obj);
933 if (itt_sync_obj != NULL)
935 __kmp_itt_task_finished(itt_sync_obj);
949 (
"__kmp_tree_barrier_release: T#%d(%d:%d) set go(%p) = %u\n", gtid,
955 KA_TRACE(20, (
"__kmp_tree_barrier_release: T#%d(%d:%d) primary enter for "
957 gtid, team->
t.t_id, tid, bt));
959 nproc = this_thr->th.th_team_nproc;
960 child_tid = (tid << branch_bits) + 1;
962 if (child_tid < nproc) {
967 kmp_info_t *child_thr = other_threads[child_tid];
971 if (child + 1 <= branch_factor && child_tid + 1 < nproc)
973 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_go);
976#if KMP_BARRIER_ICV_PUSH
979 if (propagate_icvs) {
981 team->
t.t_threads[child_tid], team,
983 copy_icvs(&team->
t.t_implicit_task_taskdata[child_tid].td_icvs,
984 &team->
t.t_implicit_task_taskdata[0].td_icvs);
989 (
"__kmp_tree_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%u)"
990 "go(%p): %u => %u\n",
992 team->
t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
999 }
while (child <= branch_factor && child_tid < nproc);
1002 20, (
"__kmp_tree_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
1003 gtid, team->
t.t_id, tid, bt));
1015 kmp_uint32 num_threads = this_thr->th.th_team_nproc;
1023 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
1024 gtid, team->
t.t_id, tid, bt));
1025 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
1027#if USE_ITT_BUILD && USE_ITT_NOTIFY
1029 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
1030 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
1031 __itt_get_timestamp();
1037 for (
level = 0, offset = 1; offset < num_threads;
1038 level += branch_bits, offset <<= branch_bits) {
1042 if (((tid >>
level) & (branch_factor - 1)) != 0) {
1043 kmp_int32 parent_tid = tid & ~((1 << (
level + branch_bits)) - 1);
1047 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
1048 "arrived(%p): %llu => %llu\n",
1050 team->
t.t_id, parent_tid, &thr_bar->b_arrived,
1057 p_flag.
set_waiter(other_threads[parent_tid]);
1065 for (child = 1, child_tid = tid + (1 <<
level);
1066 child < branch_factor && child_tid < num_threads;
1067 child++, child_tid += (1 <<
level)) {
1068 kmp_info_t *child_thr = other_threads[child_tid];
1069 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1073 if (child + 1 < branch_factor && next_child_tid < num_threads)
1075 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_arrived);
1078 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%u) "
1079 "arrived(%p) == %llu\n",
1081 team->
t.t_id, child_tid, &child_bar->b_arrived, new_state));
1086#if USE_ITT_BUILD && USE_ITT_NOTIFY
1089 if (__kmp_forkjoin_frames_mode == 2) {
1090 this_thr->th.th_bar_min_time =
KMP_MIN(this_thr->th.th_bar_min_time,
1091 child_thr->th.th_bar_min_time);
1096 (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
1098 team->
t.t_id, child_tid));
1101 (*reduce)(this_thr->th.th_local.reduce_data,
1102 child_thr->th.th_local.reduce_data);
1113 team->
t.t_bar[bt].b_arrived = new_state;
1114 KA_TRACE(20, (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) set team %d "
1115 "arrived(%p) = %llu\n",
1116 gtid, team->
t.t_id, tid, team->
t.t_id,
1117 &team->
t.t_bar[bt].b_arrived, team->
t.t_bar[bt].b_arrived));
1120 20, (
"__kmp_hyper_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
1121 gtid, team->
t.t_id, tid, bt));
1125#define KMP_REVERSE_HYPER_BAR
1148 KA_TRACE(20, (
"__kmp_hyper_barrier_release: T#%d(%d:%d) primary enter for "
1149 "barrier type %d\n",
1150 gtid, team->
t.t_id, tid, bt));
1151#if KMP_BARRIER_ICV_PUSH
1152 if (propagate_icvs) {
1154 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
1158 KA_TRACE(20, (
"__kmp_hyper_barrier_release: T#%d wait go(%p) == %u\n", gtid,
1163#if USE_ITT_BUILD && USE_ITT_NOTIFY
1164 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
1168 __kmp_itt_task_starting(itt_sync_obj);
1174 if (itt_sync_obj != NULL)
1176 __kmp_itt_task_finished(itt_sync_obj);
1190 (
"__kmp_hyper_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
1194 num_threads = this_thr->th.th_team_nproc;
1195 other_threads = team->
t.t_threads;
1197#ifdef KMP_REVERSE_HYPER_BAR
1199 for (
level = 0, offset = 1;
1200 offset < num_threads && (((tid >>
level) & (branch_factor - 1)) == 0);
1201 level += branch_bits, offset <<= branch_bits)
1205 for (
level -= branch_bits, offset >>= branch_bits; offset != 0;
1206 level -= branch_bits, offset >>= branch_bits)
1209 for (
level = 0, offset = 1; offset < num_threads;
1210 level += branch_bits, offset <<= branch_bits)
1213#ifdef KMP_REVERSE_HYPER_BAR
1217 for (child = (child < branch_factor - 1) ? child : branch_factor - 1,
1218 child_tid = tid + (child <<
level);
1219 child >= 1; child--, child_tid -= (1 <<
level))
1221 if (((tid >>
level) & (branch_factor - 1)) != 0)
1226 for (child = 1, child_tid = tid + (1 <<
level);
1227 child < branch_factor && child_tid < num_threads;
1228 child++, child_tid += (1 <<
level))
1231 if (child_tid >= num_threads)
1234 kmp_info_t *child_thr = other_threads[child_tid];
1235 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1239#ifdef KMP_REVERSE_HYPER_BAR
1240 if (child - 1 >= 1 && next_child_tid < num_threads)
1242 if (child + 1 < branch_factor && next_child_tid < num_threads)
1245 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_go);
1248#if KMP_BARRIER_ICV_PUSH
1250 copy_icvs(&child_bar->th_fixed_icvs, &thr_bar->th_fixed_icvs);
1255 (
"__kmp_hyper_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%u)"
1256 "go(%p): %u => %u\n",
1258 team->
t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1266#if KMP_BARRIER_ICV_PUSH
1267 if (propagate_icvs &&
1271 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1272 &thr_bar->th_fixed_icvs);
1277 (
"__kmp_hyper_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
1278 gtid, team->
t.t_id, tid, bt));
1296 bool uninitialized = thr_bar->team == NULL;
1297 bool team_changed = team != thr_bar->team;
1298 bool team_sz_changed = nproc != thr_bar->nproc;
1299 bool tid_changed = tid != thr_bar->old_tid;
1300 bool retval =
false;
1302 if (uninitialized || team_sz_changed) {
1306 if (uninitialized || team_sz_changed || tid_changed) {
1307 thr_bar->my_level = thr_bar->depth - 1;
1308 thr_bar->parent_tid = -1;
1312 while (d < thr_bar->depth) {
1315 if (
d == thr_bar->depth - 2) {
1316 thr_bar->parent_tid = 0;
1317 thr_bar->my_level =
d;
1319 }
else if ((rem = tid % thr_bar->skip_per_level[
d + 1]) != 0) {
1322 thr_bar->parent_tid = tid - rem;
1323 thr_bar->my_level =
d;
1330 (thr_bar->skip_per_level[thr_bar->my_level])),
1331 &(thr_bar->offset));
1332 thr_bar->old_tid = tid;
1334 thr_bar->team = team;
1335 thr_bar->parent_bar =
1336 &team->
t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1338 if (uninitialized || team_changed || tid_changed) {
1339 thr_bar->team = team;
1340 thr_bar->parent_bar =
1341 &team->
t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1344 if (uninitialized || team_sz_changed || tid_changed) {
1345 thr_bar->nproc = nproc;
1346 thr_bar->leaf_kids = thr_bar->base_leaf_kids;
1347 if (thr_bar->my_level == 0)
1348 thr_bar->leaf_kids = 0;
1349 if (thr_bar->leaf_kids && (
kmp_uint32)tid + thr_bar->leaf_kids + 1 > nproc)
1351 thr_bar->leaf_state = 0;
1352 for (
int i = 0;
i < thr_bar->leaf_kids; ++
i)
1353 ((
char *)&(thr_bar->leaf_state))[7 -
i] = 1;
1364 kmp_uint32 nproc = this_thr->th.th_team_nproc;
1368 int level = team->
t.t_level;
1369 if (other_threads[0]
1370 ->th.th_teams_microtask)
1371 if (this_thr->th.th_teams_size.nteams > 1)
1374 thr_bar->use_oncore_barrier = 1;
1376 thr_bar->use_oncore_barrier = 0;
1378 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) enter for "
1379 "barrier type %d\n",
1380 gtid, team->
t.t_id, tid, bt));
1381 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
1383#if USE_ITT_BUILD && USE_ITT_NOTIFY
1385 if (__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
1386 this_thr->th.th_bar_arrive_time = __itt_get_timestamp();
1393 if (thr_bar->my_level) {
1398 thr_bar->use_oncore_barrier) {
1399 if (thr_bar->leaf_kids) {
1403 ? thr_bar->b_arrived | thr_bar->leaf_state
1404 : team->
t.t_bar[bt].b_arrived | thr_bar->leaf_state;
1405 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) waiting "
1407 gtid, team->
t.t_id, tid));
1413 for (child_tid = tid + 1; child_tid <= tid + thr_bar->leaf_kids;
1415 KA_TRACE(100, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
1417 gtid, team->
t.t_id, tid,
1420 (*reduce)(this_thr->th.th_local.reduce_data,
1421 other_threads[child_tid]->th.th_local.reduce_data);
1431 kmp_uint32 last = tid + thr_bar->skip_per_level[
d + 1],
1432 skip = thr_bar->skip_per_level[
d];
1435 for (child_tid = tid + skip; child_tid < (
int)last; child_tid += skip) {
1436 kmp_info_t *child_thr = other_threads[child_tid];
1437 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1438 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait "
1440 "arrived(%p) == %llu\n",
1441 gtid, team->
t.t_id, tid,
1443 child_tid, &child_bar->b_arrived, new_state));
1447 KA_TRACE(100, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
1449 gtid, team->
t.t_id, tid,
1452 (*reduce)(this_thr->th.th_local.reduce_data,
1453 child_thr->th.th_local.reduce_data);
1460 kmp_uint32 last = tid + thr_bar->skip_per_level[
d + 1],
1461 skip = thr_bar->skip_per_level[
d];
1464 for (child_tid = tid + skip; child_tid < (
int)last; child_tid += skip) {
1465 kmp_info_t *child_thr = other_threads[child_tid];
1466 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1467 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait "
1469 "arrived(%p) == %llu\n",
1470 gtid, team->
t.t_id, tid,
1472 child_tid, &child_bar->b_arrived, new_state));
1476 KA_TRACE(100, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += "
1478 gtid, team->
t.t_id, tid,
1481 (*reduce)(this_thr->th.th_local.reduce_data,
1482 child_thr->th.th_local.reduce_data);
1491 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) releasing"
1492 " T#%d(%d:%d) arrived(%p): %llu => %llu\n",
1493 gtid, team->
t.t_id, tid,
1495 thr_bar->parent_tid, &thr_bar->b_arrived, thr_bar->b_arrived,
1501 !thr_bar->use_oncore_barrier) {
1504 other_threads[thr_bar->parent_tid]);
1510 thr_bar->offset + 1);
1511 flag.set_waiter(other_threads[thr_bar->parent_tid]);
1515 team->
t.t_bar[bt].b_arrived = new_state;
1516 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) set team %d "
1517 "arrived(%p) = %llu\n",
1518 gtid, team->
t.t_id, tid, team->
t.t_id,
1519 &team->
t.t_bar[bt].b_arrived, team->
t.t_bar[bt].b_arrived));
1522 KA_TRACE(20, (
"__kmp_hierarchical_barrier_gather: T#%d(%d:%d) exit for "
1523 "barrier type %d\n",
1524 gtid, team->
t.t_id, tid, bt));
1534 bool team_change =
false;
1539 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) primary "
1540 "entered barrier type %d\n",
1541 gtid, team->
t.t_id, tid, bt));
1544 if (!thr_bar->use_oncore_barrier ||
1546 thr_bar->team == NULL) {
1551 TCW_8(thr_bar->b_go,
1558 thr_bar->offset + 1, bt,
1561 if (thr_bar->wait_flag ==
1563 TCW_8(thr_bar->b_go,
1566 (
RCAST(
volatile char *,
1567 &(thr_bar->parent_bar->b_go)))[thr_bar->offset + 1] = 0;
1581 (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
1586 nproc = this_thr->th.th_team_nproc;
1587 int level = team->
t.t_level;
1588 if (team->
t.t_threads[0]
1589 ->th.th_teams_microtask) {
1591 this_thr->th.th_teams_level ==
level)
1593 if (this_thr->th.th_teams_size.nteams > 1)
1597 thr_bar->use_oncore_barrier = 1;
1599 thr_bar->use_oncore_barrier = 0;
1603 unsigned short int old_leaf_kids = thr_bar->leaf_kids;
1604 kmp_uint64 old_leaf_state = thr_bar->leaf_state;
1611#if KMP_BARRIER_ICV_PUSH
1612 if (propagate_icvs) {
1618 &team->
t.t_implicit_task_taskdata[tid].td_icvs);
1620 thr_bar->use_oncore_barrier) {
1621 if (!thr_bar->my_level)
1624 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1625 &thr_bar->parent_bar->th_fixed_icvs);
1628 if (thr_bar->my_level)
1630 copy_icvs(&thr_bar->th_fixed_icvs, &thr_bar->parent_bar->th_fixed_icvs);
1632 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1633 &thr_bar->parent_bar->th_fixed_icvs);
1639 if (thr_bar->my_level) {
1643 thr_bar->use_oncore_barrier) {
1653 for (child_tid = thr_bar->skip_per_level[1]; child_tid < (
int)nproc;
1654 child_tid += thr_bar->skip_per_level[1]) {
1656 &team->
t.t_threads[child_tid]->th.th_bar[bt].bb;
1657 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) "
1658 "releasing T#%d(%d:%d)"
1659 " go(%p): %u => %u\n",
1660 gtid, team->
t.t_id, tid,
1662 child_tid, &child_bar->b_go, child_bar->b_go,
1666 ngo_store_go(&child_bar->th_fixed_icvs, &thr_bar->th_fixed_icvs);
1670 TCW_8(thr_bar->b_go,
1673 if (thr_bar->leaf_kids) {
1676 old_leaf_kids < thr_bar->leaf_kids) {
1677 if (old_leaf_kids) {
1678 thr_bar->b_go |= old_leaf_state;
1681 last = tid + thr_bar->skip_per_level[1];
1684 for (child_tid = tid + 1 + old_leaf_kids; child_tid < (
int)last;
1686 kmp_info_t *child_thr = team->
t.t_threads[child_tid];
1687 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1690 (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing"
1691 " T#%d(%d:%d) go(%p): %u => %u\n",
1693 team->
t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1701 thr_bar->b_go |= thr_bar->leaf_state;
1705 for (
int d = thr_bar->my_level - 1;
d >= 0;
1707 last = tid + thr_bar->skip_per_level[
d + 1];
1711 for (child_tid = tid + skip; child_tid < (
int)last; child_tid += skip) {
1712 kmp_info_t *child_thr = team->
t.t_threads[child_tid];
1713 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1714 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) "
1715 "releasing T#%d(%d:%d) go(%p): %u => %u\n",
1716 gtid, team->
t.t_id, tid,
1718 child_tid, &child_bar->b_go, child_bar->b_go,
1726#if KMP_BARRIER_ICV_PUSH
1729 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
1730 &thr_bar->th_fixed_icvs);
1733 KA_TRACE(20, (
"__kmp_hierarchical_barrier_release: T#%d(%d:%d) exit for "
1734 "barrier type %d\n",
1735 gtid, team->
t.t_id, tid, bt));
1756 constexpr operator bool()
const {
return false; }
1767template <
bool cancellable = false>
1769 size_t reduce_size,
void *reduce_data,
1770 void (*reduce)(
void *,
void *)) {
1778#if OMPT_SUPPORT && OMPT_OPTIONAL
1779 ompt_data_t *my_task_data;
1780 ompt_data_t *my_parallel_data;
1781 void *return_address;
1782 ompt_sync_region_t barrier_kind;
1785 KA_TRACE(15, (
"__kmp_barrier: T#%d(%d:%d) has arrived\n", gtid,
1791 my_task_data = OMPT_CUR_TASK_DATA(this_thr);
1792 my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
1793 return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
1797 barrier_kind, ompt_scope_begin, my_parallel_data, my_task_data,
1802 barrier_kind, ompt_scope_begin, my_parallel_data, my_task_data,
1809 auto *ompt_thr_info = &this_thr->th.ompt_thread_info;
1810 switch (barrier_kind) {
1811 case ompt_sync_region_barrier_explicit:
1812 ompt_thr_info->state = ompt_state_wait_barrier_explicit;
1814 case ompt_sync_region_barrier_implicit_workshare:
1815 ompt_thr_info->state = ompt_state_wait_barrier_implicit_workshare;
1817 case ompt_sync_region_barrier_implicit_parallel:
1818 ompt_thr_info->state = ompt_state_wait_barrier_implicit_parallel;
1820 case ompt_sync_region_barrier_teams:
1821 ompt_thr_info->state = ompt_state_wait_barrier_teams;
1823 case ompt_sync_region_barrier_implementation:
1826 ompt_thr_info->state = ompt_state_wait_barrier_implementation;
1831 if (!team->
t.t_serialized) {
1834 void *itt_sync_obj = NULL;
1836 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1837 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
1843 (
"__kmp_barrier: T#%d(%d:%d) past tasking barrier\n", gtid,
1853 this_thr->th.th_team_bt_intervals =
1854 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
1855 this_thr->th.th_team_bt_set =
1856 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
1863 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1864 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
1869 team->
t.t_bar[bt].b_master_arrived += 1;
1871 this_thr->th.th_bar[bt].bb.b_worker_arrived += 1;
1874 if (reduce != NULL) {
1876 this_thr->th.th_local.reduce_data = reduce_data;
1928 team->
t.t_bar[bt].b_team_arrived += 1;
1944 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1945 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
1947#if USE_ITT_BUILD && USE_ITT_NOTIFY
1949 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
1950 __kmp_forkjoin_frames_mode &&
1951 (this_thr->th.th_teams_microtask == NULL ||
1952 this_thr->th.th_teams_size.nteams == 1) &&
1953 team->
t.t_active_level == 1) {
1957 int nproc = this_thr->th.th_team_nproc;
1959 switch (__kmp_forkjoin_frames_mode) {
1961 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
1963 this_thr->th.th_frame_time = cur_time;
1967 __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time,
1971 if (__itt_metadata_add_ptr) {
1973 kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
1976 this_thr->th.th_bar_arrive_time = 0;
1977 for (
i = 1;
i < nproc; ++
i) {
1978 delta += (cur_time - other_threads[
i]->th.th_bar_arrive_time);
1979 other_threads[
i]->th.th_bar_arrive_time = 0;
1981 __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time,
1985 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
1987 this_thr->th.th_frame_time = cur_time;
1995 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1996 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
1999 if ((
status == 1 || !is_split) && !cancelled) {
2043 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2044 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2049 if (this_thr->th.th_task_team != NULL) {
2051 void *itt_sync_obj = NULL;
2052 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2053 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
2054 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
2059 this_thr->th.th_task_team->tt.tt_found_proxy_tasks ==
TRUE ||
2060 this_thr->th.th_task_team->tt.tt_hidden_helper_task_encountered ==
2066 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2067 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2072 KA_TRACE(15, (
"__kmp_barrier: T#%d(%d:%d) is leaving with return value %d\n",
2081 barrier_kind, ompt_scope_end, my_parallel_data, my_task_data,
2086 barrier_kind, ompt_scope_end, my_parallel_data, my_task_data,
2090 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
2095 return (
int)cancelled;
2101 size_t reduce_size,
void *reduce_data,
2102 void (*reduce)(
void *,
void *)) {
2103 return __kmp_barrier_template<>(bt, gtid, is_split, reduce_size, reduce_data,
2107#if defined(KMP_GOMP_COMPAT)
2139 if (!team->
t.t_serialized) {
2189 void *itt_sync_obj = NULL;
2191 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2196#if ((USE_ITT_BUILD && USE_ITT_NOTIFY) || defined KMP_DEBUG)
2197 int nproc = this_thr->th.th_team_nproc;
2202 team = this_thr->th.th_team;
2206 team_id = team->
t.t_id;
2207 kmp_info_t *master_thread = this_thr->th.th_team_master;
2208 if (master_thread != team->
t.t_threads[0]) {
2219 KA_TRACE(10, (
"__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n",
2220 gtid, team_id, tid));
2225 ompt_data_t *my_task_data;
2226 ompt_data_t *my_parallel_data;
2227 void *codeptr = NULL;
2228 int ds_tid = this_thr->th.th_info.ds.ds_tid;
2232 codeptr = team->
t.ompt_team_info.master_return_address;
2233 my_task_data = OMPT_CUR_TASK_DATA(this_thr);
2234 my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
2235 ompt_sync_region_t sync_kind = ompt_sync_region_barrier_implicit_parallel;
2236 ompt_state_t ompt_state = ompt_state_wait_barrier_implicit_parallel;
2237 if (this_thr->th.ompt_thread_info.parallel_flags & ompt_parallel_league) {
2238 sync_kind = ompt_sync_region_barrier_teams;
2239 ompt_state = ompt_state_wait_barrier_teams;
2243 sync_kind, ompt_scope_begin, my_parallel_data, my_task_data, codeptr);
2247 sync_kind, ompt_scope_begin, my_parallel_data, my_task_data, codeptr);
2250 this_thr->th.ompt_thread_info.task_data = *OMPT_CUR_TASK_DATA(this_thr);
2252 this_thr->th.ompt_thread_info.state = ompt_state;
2258 KA_TRACE(10, (
"__kmp_join_barrier: T#%d(%d:%d) past tasking barrier\n",
2259 gtid, team_id, tid));
2263 KA_TRACE(20, (
"__kmp_join_barrier: T#%d, old team = %d, old task_team = "
2264 "%p, th_task_team = %p\n",
2266 team->
t.t_task_team[this_thr->th.th_task_state],
2267 this_thr->th.th_task_team));
2279 this_thr->th.th_team_bt_intervals =
2280 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2281 this_thr->th.th_team_bt_set =
2282 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2289 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2290 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
2333#if KMP_STATS_ENABLED
2337 for (
int i = 0;
i < team->
t.t_nproc; ++
i) {
2339 if (team_thread == this_thr)
2341 team_thread->th.th_stats->setIdleFlag();
2343 team_thread->th.th_sleep_loc != NULL)
2348 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2349 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
2352#if USE_ITT_BUILD && USE_ITT_NOTIFY
2354 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2355 __kmp_forkjoin_frames_mode &&
2356 (this_thr->th.th_teams_microtask == NULL ||
2357 this_thr->th.th_teams_size.nteams == 1) &&
2358 team->
t.t_active_level == 1) {
2362 switch (__kmp_forkjoin_frames_mode) {
2364 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
2368 __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time, 1,
2372 if (__itt_metadata_add_ptr) {
2374 kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
2377 this_thr->th.th_bar_arrive_time = 0;
2378 for (
int i = 1;
i < nproc; ++
i) {
2379 delta += (cur_time - other_threads[
i]->th.th_bar_arrive_time);
2380 other_threads[
i]->th.th_bar_arrive_time = 0;
2382 __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time,
2383 cur_time, delta, 0);
2385 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
2387 this_thr->th.th_frame_time = cur_time;
2395 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
2396 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
2404 (
"__kmp_join_barrier: T#%d(%d:%d) says all %d team threads arrived\n",
2405 gtid, team_id, tid, nproc));
2412 (
"__kmp_join_barrier: T#%d(%d:%d) leaving\n", gtid, team_id, tid));
2422 kmp_team_t *team = (tid == 0) ? this_thr->th.th_team : NULL;
2424 void *itt_sync_obj = NULL;
2428 KA_TRACE(10, (
"__kmp_fork_barrier: T#%d(%d:%d) has arrived\n", gtid,
2429 (team != NULL) ? team->
t.t_id : -1, tid));
2433#if USE_ITT_BUILD && USE_ITT_NOTIFY
2434 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2437 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
2449 for (
i = 1;
i < team->
t.t_nproc; ++
i) {
2451 (
"__kmp_fork_barrier: T#%d(%d:0) checking T#%d(%d:%d) fork go "
2453 gtid, team->
t.t_id, other_threads[
i]->th.th_info.ds.ds_gtid,
2454 team->
t.t_id, other_threads[
i]->th.th_info.ds.ds_tid,
2474 this_thr->th.th_team_bt_intervals =
2475 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2476 this_thr->th.th_team_bt_set =
2477 team->
t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2514 ompt_state_t ompt_state = this_thr->th.ompt_thread_info.state;
2516 (ompt_state == ompt_state_wait_barrier_teams ||
2517 ompt_state == ompt_state_wait_barrier_implicit_parallel)) {
2518 int ds_tid = this_thr->th.th_info.ds.ds_tid;
2519 ompt_data_t *task_data = (team)
2520 ? OMPT_CUR_TASK_DATA(this_thr)
2521 : &(this_thr->th.ompt_thread_info.task_data);
2522 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
2524 void *codeptr = NULL;
2528 codeptr = team ? team->
t.ompt_team_info.master_return_address : NULL;
2529 ompt_sync_region_t sync_kind = ompt_sync_region_barrier_implicit_parallel;
2530 if (this_thr->th.ompt_thread_info.parallel_flags & ompt_parallel_league)
2531 sync_kind = ompt_sync_region_barrier_teams;
2534 sync_kind, ompt_scope_end, NULL, task_data, codeptr);
2538 sync_kind, ompt_scope_end, NULL, task_data, codeptr);
2543 ompt_scope_end, NULL, task_data, 0, ds_tid,
2544 ompt_task_implicit);
2551 this_thr->th.th_task_team = NULL;
2553#if USE_ITT_BUILD && USE_ITT_NOTIFY
2554 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2558 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2562 KA_TRACE(10, (
"__kmp_fork_barrier: T#%d is leaving early\n", gtid));
2574#if KMP_BARRIER_ICV_PULL
2587 (
"__kmp_fork_barrier: T#%d(%d) is PULLing ICVs\n", gtid, tid));
2590 copy_icvs(&team->
t.t_implicit_task_taskdata[tid].td_icvs,
2591 &team->
t.t_threads[0]
2602#if KMP_AFFINITY_SUPPORTED
2606 if (__kmp_affinity.type == affinity_balanced && team->
t.t_size_changed) {
2607 __kmp_balanced_affinity(this_thr, team->
t.t_nproc);
2610 if (this_thr->th.th_new_place == this_thr->th.th_current_place) {
2611 KA_TRACE(100, (
"__kmp_fork_barrier: T#%d already in correct place %d\n",
2613 this_thr->th.th_current_place));
2615 __kmp_affinity_bind_place(gtid);
2621 if (team->
t.t_display_affinity
2623 || (__kmp_affinity.type == affinity_balanced && team->
t.t_size_changed)
2628 this_thr->th.th_prev_num_threads = team->
t.t_nproc;
2629 this_thr->th.th_prev_level = team->
t.t_level;
2635#if USE_ITT_BUILD && USE_ITT_NOTIFY
2636 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2640 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
2644 KA_TRACE(10, (
"__kmp_fork_barrier: T#%d(%d:%d) is leaving\n", gtid,
2645 team->
t.t_id, tid));
2658#if KMP_BARRIER_ICV_PULL
2667 KF_TRACE(10, (
"__kmp_setup_icv_copy: PULL: T#%d this_thread=%p team=%p\n", 0,
2668 team->
t.t_threads[0], team));
2669#elif KMP_BARRIER_ICV_PUSH
2672 KF_TRACE(10, (
"__kmp_setup_icv_copy: PUSH: T#%d this_thread=%p team=%p\n", 0,
2673 team->
t.t_threads[0], team));
2680 for (
int f = 1;
f < new_nproc; ++
f) {
2682 KF_TRACE(10, (
"__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2683 f, team->
t.t_threads[
f], team));
2686 KF_TRACE(10, (
"__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2687 f, team->
t.t_threads[
f], team));
size_t KMP_ALIGN_CACHE gos_per_group
size_t KMP_ALIGN_CACHE num_groups
size_t KMP_ALIGN_CACHE threads_per_group
size_t KMP_ALIGN_CACHE num_gos
size_t KMP_ALIGN_CACHE threads_per_go
size_t KMP_ALIGN_CACHE num_threads
size_t KMP_ALIGN_CACHE max_threads
flags_s * flags[MAX_ITERS]
bool KMP_ALIGN_CACHE fix_threads_per_go
bool wait(kmp_info_t *this_thr, int final_spin USE_ITT_BUILD_ARG(void *itt_sync_obj))
bool wait(kmp_info_t *this_thr, int final_spin USE_ITT_BUILD_ARG(void *itt_sync_obj))
bool wait(kmp_info_t *this_thr, int final_spin USE_ITT_BUILD_ARG(void *itt_sync_obj))
void set_waiter(kmp_info_t *thr)
int get_level(kmp_hw_t type) const
int get_count(int level) const
int calculate_ratio(int level1, int level2) const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
#define KMP_INTERNAL_MALLOC(sz)
kmp_global_t __kmp_global
void __kmp_teams_master(int gtid)
#define KMP_MAX_BLOCKTIME
#define KMP_INTERNAL_REALLOC(p, sz)
void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team)
#define KMP_NOT_SAFE_TO_REAP
static kmp_team_t * __kmp_team_from_gtid(int gtid)
kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier]
kmp_tasking_mode_t __kmp_tasking_mode
void __kmp_abort_thread(void)
void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar)
int __kmp_omp_cancellation
#define KMP_BARRIER_UNUSED_STATE
int __kmp_barrier_gomp_cancel(int gtid)
#define KMP_BARRIER_SLEEP_STATE
static int __kmp_tid_from_gtid(int gtid)
#define KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, thr)
#define KMP_CHECK_UPDATE(a, b)
#define KMP_MASTER_TID(tid)
kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier]
#define KMP_BARRIER_OWN_FLAG
void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr, kmp_team_t *team, int tid, int set_curr_task)
static void copy_icvs(kmp_internal_control_t *dst, kmp_internal_control_t *src)
#define KMP_TASKING_ENABLED(task_team)
kmp_info_t ** __kmp_threads
#define KMP_BARRIER_PARENT_FLAG
#define KMP_MASTER_GTID(gtid)
volatile int __kmp_init_parallel
#define __kmp_allocate(size)
void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid)
void __kmp_aux_display_affinity(int gtid, const char *format)
#define KMP_INIT_BARRIER_STATE
kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier]
#define KMP_BARRIER_NOT_WAITING
static int __kmp_gtid_from_tid(int tid, const kmp_team_t *team)
#define KMP_BARRIER_SWITCHING
int __kmp_display_affinity
#define KMP_BLOCKTIME_INTERVAL(team, tid)
void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team)
int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64< C, S > *flag, int final_spin, int *thread_finished, kmp_int32 is_constrained)
#define KMP_BARRIER_STATE_BUMP
void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64< C, S > *flag)
static int __kmp_gtid_from_thread(const kmp_info_t *thr)
static void __kmp_type_convert(T1 src, T2 *dest)
struct KMP_ALIGN_CACHE kmp_bstate kmp_bstate_t
kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier]
union KMP_ALIGN_CACHE kmp_info kmp_info_t
void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team, int wait=1)
kmp_topology_t * __kmp_topology
static bool __kmp_linear_barrier_gather_template(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_dist_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_tree_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
#define ngo_store_icvs(dst, src)
static bool __kmp_linear_barrier_release_template(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
#define ngo_store_go(dst, src)
int __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, void *reduce_data, void(*reduce)(void *, void *))
void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc, kmp_internal_control_t *new_icvs, ident_t *loc)
static void __kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_join_barrier(int gtid)
static void __kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_end_split_barrier(enum barrier_type bt, int gtid)
static bool __kmp_linear_barrier_release_cancellable(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
static bool __kmp_linear_barrier_gather_cancellable(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, void *reduce_data, void(*reduce)(void *, void *))
static void __kmp_linear_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_print_structure(void)
static void __kmp_hyper_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
void __kmp_dist_barrier_wakeup(enum barrier_type bt, kmp_team_t *team, size_t start, size_t stop, size_t inc, size_t tid)
static void __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
static bool __kmp_init_hierarchical_barrier_thread(enum barrier_type bt, kmp_bstate_t *thr_bar, kmp_uint32 nproc, int gtid, int tid, kmp_team_t *team)
void __kmp_fork_barrier(int gtid, int tid)
static void __kmp_dist_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void(*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj))
static void __kmp_linear_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj))
#define KMP_OPTIMIZE_FOR_REDUCTIONS
#define KMP_DEBUG_ASSERT(cond)
unsigned long long kmp_uint64
static volatile kmp_i18n_cat_status_t status
#define USE_ITT_BUILD_ARG(x)
void(* microtask_t)(int *gtid, int *npr,...)
#define KMP_TEST_THEN_AND64(p, v)
#define KMP_CACHE_PREFETCH(ADDR)
#define KMP_ATOMIC_ST_RLX(p, v)
#define KMP_ATOMIC_LD_RLX(p)
#define KMP_AFFINITY_SUPPORTED
#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv)
Functions for collecting statistics.
#define KMP_SET_THREAD_STATE_BLOCK(state_name)
#define KMP_TIME_PARTITIONED_BLOCK(name)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n)
static void __kmp_null_resume_wrapper(kmp_info_t *thr)
ompt_callbacks_active_t ompt_enabled
ompt_callbacks_internal_t ompt_callbacks
ompt_sync_region_t __ompt_get_barrier_kind(enum barrier_type bt, kmp_info_t *thr)
#define OMPT_REDUCTION_BEGIN
#define OMPT_REDUCTION_DECL(this_thr, gtid)
#define OMPT_REDUCTION_END
is_cancellable & operator=(bool b)
is_cancellable & operator=(bool b)
KMP_ALIGN_CACHE volatile kmp_uint32 tt_active