24#define HWLOC_GROUP_KIND_INTEL_MODULE 102
25#define HWLOC_GROUP_KIND_INTEL_TILE 103
26#define HWLOC_GROUP_KIND_INTEL_DIE 104
27#define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
41#if KMP_AFFINITY_SUPPORTED
43class kmp_full_mask_modifier_t {
44 kmp_affin_mask_t *
mask;
47 kmp_full_mask_modifier_t() {
51 ~kmp_full_mask_modifier_t() {
55 void include(
const kmp_affin_mask_t *other) { KMP_CPU_UNION(
mask, other); }
58 bool restrict_to_mask() {
60 if (KMP_CPU_EQUAL(__kmp_affin_fullMask,
mask) || KMP_CPU_ISEMPTY(
mask))
66static inline const char *
67__kmp_get_affinity_env_var(
const kmp_affinity_t &affinity,
68 bool for_binding =
false) {
69 if (affinity.flags.omp_places) {
71 return "OMP_PROC_BIND";
74 return affinity.env_var;
92 thr_bar->depth = depth;
94 &(thr_bar->base_leaf_kids));
100#ifndef KMP_DFLT_NTH_CORES
134 KMP_ASSERT2(
false,
"Unhandled kmp_hw_t enumeration");
141 return ((plural) ?
"sockets" :
"socket");
143 return ((plural) ?
"dice" :
"die");
145 return ((plural) ?
"modules" :
"module");
147 return ((plural) ?
"tiles" :
"tile");
149 return ((plural) ?
"numa_domains" :
"numa_domain");
151 return ((plural) ?
"l3_caches" :
"l3_cache");
153 return ((plural) ?
"l2_caches" :
"l2_cache");
155 return ((plural) ?
"l1_caches" :
"l1_cache");
157 return ((plural) ?
"ll_caches" :
"ll_cache");
159 return ((plural) ?
"cores" :
"core");
161 return ((plural) ?
"threads" :
"thread");
163 return ((plural) ?
"proc_groups" :
"proc_group");
166 return ((plural) ?
"unknowns" :
"unknown");
168 KMP_ASSERT2(
false,
"Unhandled kmp_hw_t enumeration");
177#if KMP_ARCH_X86 || KMP_ARCH_X86_64
178 case KMP_HW_CORE_TYPE_ATOM:
179 return "Intel Atom(R) processor";
180 case KMP_HW_CORE_TYPE_CORE:
181 return "Intel(R) Core(TM) processor";
184 KMP_ASSERT2(
false,
"Unhandled kmp_hw_core_type_t enumeration");
188#if KMP_AFFINITY_SUPPORTED
191#define KMP_AFF_WARNING(s, ...) \
192 if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \
193 KMP_WARNING(__VA_ARGS__); \
196#define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__)
237#if KMP_AFFINITY_SUPPORTED
246 for (
i = 0;
i < compact;
i++) {
247 int j = depth -
i - 1;
253 for (;
i < depth;
i++) {
266 printf(
"%4d ",
os_id);
267 for (
int i = 0;
i < depth; ++
i) {
295 for (target_layer = 0; target_layer < depth; ++target_layer) {
296 bool layers_equal =
true;
297 bool strictly_above_target_layer =
false;
298 for (
int i = 0;
i < num_hw_threads; ++
i) {
299 int id = hw_threads[
i].
ids[target_layer];
301 if (
id != previous_id && new_id == previous_new_id) {
303 strictly_above_target_layer =
true;
304 layers_equal =
false;
306 }
else if (
id == previous_id && new_id != previous_new_id) {
308 layers_equal =
false;
312 previous_new_id = new_id;
314 if (strictly_above_target_layer || layers_equal)
320 for (
int i = depth - 1,
j = depth;
i >= target_layer; --
i, --
j)
322 types[target_layer] =
type;
323 for (
int k = 0; k < num_hw_threads; ++k) {
324 for (
int i = depth - 1,
j = depth;
i >= target_layer; --
i, --
j)
325 hw_threads[k].ids[
j] = hw_threads[k].ids[
i];
326 hw_threads[k].
ids[target_layer] = ids[k];
332#if KMP_GROUP_AFFINITY
334void kmp_topology_t::_insert_windows_proc_groups() {
336 if (__kmp_num_proc_groups == 1)
338 kmp_affin_mask_t *
mask;
341 for (
int i = 0;
i < num_hw_threads; ++
i) {
343 KMP_CPU_SET(hw_threads[
i].os_id,
mask);
344 ids[
i] = __kmp_get_proc_group(
mask);
357void kmp_topology_t::_remove_radix1_layers() {
359 int top_index1, top_index2;
375 while (top_index1 < depth - 1 && top_index2 < depth) {
386 top_index1 = top_index2++;
390 bool all_same =
true;
391 int id1 = hw_threads[0].
ids[top_index1];
392 int id2 = hw_threads[0].
ids[top_index2];
393 int pref1 = preference[type1];
394 int pref2 = preference[type2];
395 for (
int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
396 if (hw_threads[hwidx].ids[top_index1] == id1 &&
397 hw_threads[hwidx].ids[top_index2] != id2) {
401 if (hw_threads[hwidx].ids[top_index2] != id2)
403 id1 = hw_threads[hwidx].
ids[top_index1];
404 id2 = hw_threads[hwidx].
ids[top_index2];
409 int remove_layer, remove_layer_ids;
412 remove_layer = remove_layer_ids = top_index2;
416 remove_layer = remove_layer_ids = top_index1;
422 remove_layer_ids = top_index2;
426 for (
int idx = 0; idx < num_hw_threads; ++idx) {
428 for (
int d = remove_layer_ids;
d < depth - 1; ++
d)
429 hw_thread.
ids[
d] = hw_thread.
ids[
d + 1];
431 for (
int idx = remove_layer; idx < depth - 1; ++idx)
432 types[idx] = types[idx + 1];
435 top_index1 = top_index2++;
441void kmp_topology_t::_set_last_level_cache() {
447 else if (__kmp_mic_type == mic3) {
470void kmp_topology_t::_gather_enumeration_information() {
474 for (
int i = 0;
i < depth; ++
i) {
481 for (
int i = 0;
i < num_hw_threads; ++
i) {
483 for (
int layer = 0; layer < depth; ++layer) {
484 int id = hw_thread.
ids[layer];
485 if (
id != previous_id[layer]) {
487 for (
int l = layer; l < depth; ++l) {
494 for (
int l = layer + 1; l < depth; ++l) {
495 if (max[l] > ratio[l])
510 for (
int j = 0;
j < num_core_types; ++
j) {
525 for (
int layer = 0; layer < depth; ++layer) {
526 previous_id[layer] = hw_thread.
ids[layer];
529 for (
int layer = 0; layer < depth; ++layer) {
530 if (max[layer] > ratio[layer])
531 ratio[layer] = max[layer];
535int kmp_topology_t::_get_ncores_with_attr(
const kmp_hw_attr_t &attr,
537 bool find_all)
const {
538 int current, current_max;
540 for (
int i = 0;
i < depth; ++
i)
548 for (
int i = 0;
i < num_hw_threads; ++
i) {
550 if (!find_all && hw_thread.
ids[above_level] != previous_id[above_level]) {
551 if (current > current_max)
552 current_max = current;
566 if (current > current_max)
567 current_max = current;
572void kmp_topology_t::_discover_uniformity() {
576 flags.uniform = (num == count[depth - 1]);
580void kmp_topology_t::_set_sub_ids() {
584 for (
int i = 0;
i < depth; ++
i) {
588 for (
int i = 0;
i < num_hw_threads; ++
i) {
591 for (
int j = 0;
j < depth; ++
j) {
592 if (hw_thread.
ids[
j] != previous_id[
j]) {
594 for (
int k =
j + 1; k < depth; ++k) {
601 for (
int j = 0;
j < depth; ++
j) {
602 previous_id[
j] = hw_thread.
ids[
j];
605 for (
int j = 0;
j < depth; ++
j) {
611void kmp_topology_t::_set_globals() {
613 int core_level, thread_level, package_level;
615#if KMP_GROUP_AFFINITY
616 if (package_level == -1)
626 if (package_level != -1) {
634#ifndef KMP_DFLT_NTH_CORES
650 retval->hw_threads =
nullptr;
652 retval->num_hw_threads = nproc;
653 retval->depth = ndepth;
659 retval->num_core_efficiencies = 0;
660 retval->num_core_types = 0;
665 for (
int i = 0;
i < ndepth; ++
i) {
666 retval->types[
i] = types[
i];
667 retval->equivalent[types[
i]] = types[
i];
679 if (num_hw_threads == 0)
681 for (
int i = 1;
i < num_hw_threads; ++
i) {
685 for (
int j = 0;
j < depth; ++
j) {
686 if (previous_thread.
ids[
j] != current_thread.
ids[
j]) {
699 printf(
"***********************\n");
700 printf(
"*** __kmp_topology: ***\n");
701 printf(
"***********************\n");
702 printf(
"* depth: %d\n", depth);
705 for (
int i = 0;
i < depth; ++
i)
710 for (
int i = 0;
i < depth; ++
i) {
711 printf(
"%15d ", ratio[
i]);
716 for (
int i = 0;
i < depth; ++
i) {
717 printf(
"%15d ", count[
i]);
721 printf(
"* num_core_eff: %d\n", num_core_efficiencies);
722 printf(
"* num_core_types: %d\n", num_core_types);
723 printf(
"* core_types: ");
724 for (
int i = 0;
i < num_core_types; ++
i)
725 printf(
"%3d ", core_types[
i]);
728 printf(
"* equivalent map:\n");
732 printf(
"%-15s -> %-15s\n",
key,
value);
735 printf(
"* uniform: %s\n", (
is_uniform() ?
"Yes" :
"No"));
737 printf(
"* num_hw_threads: %d\n", num_hw_threads);
738 printf(
"* hw_threads:\n");
739 for (
int i = 0;
i < num_hw_threads; ++
i) {
742 printf(
"***********************\n");
747 int print_types_depth;
752 if (num_hw_threads) {
753 KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
779 print_types_depth = 0;
781 print_types[print_types_depth++] = types[
level];
803 for (
int plevel = 0,
level = 0; plevel < print_types_depth; ++plevel) {
806 numerator_type = print_types[plevel];
808 if (equivalent[numerator_type] != numerator_type)
821 denominator_type = numerator_type;
827 for (
int i = 0;
i < num_core_types; ++
i) {
837 for (
int eff = 0; eff < num_core_efficiencies; ++eff) {
840 if (ncores_with_eff > 0) {
841 KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
848 if (num_hw_threads <= 0) {
854 KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
855 for (
int i = 0;
i < num_hw_threads;
i++) {
868 KMP_INFORM(OSProcMapToPack, env_var, hw_threads[
i].os_id,
buf.str);
874#if KMP_AFFINITY_SUPPORTED
875void kmp_topology_t::set_granularity(kmp_affinity_t &affinity)
const {
876 const char *env_var = __kmp_get_affinity_env_var(affinity);
881 if (affinity.core_attr_gran.valid) {
885 affinity, AffIgnoringNonHybrid, env_var,
888 affinity.gran_levels = -1;
889 affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN;
890 affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0;
891 }
else if (affinity.flags.core_types_gran ||
892 affinity.flags.core_effs_gran) {
894 if (affinity.flags.omp_places) {
896 affinity, AffIgnoringNonHybrid, env_var,
901 "Intel(R) Hybrid Technology core attribute",
905 affinity.gran_levels = -1;
906 affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN;
907 affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0;
911 if (affinity.gran_levels < 0) {
917 for (
auto g : gran_types) {
928 affinity.gran = gran_type;
930#if KMP_GROUP_AFFINITY
938 if (__kmp_num_proc_groups > 1) {
941 if (gran_depth >= 0 && proc_group_depth >= 0 &&
942 gran_depth < proc_group_depth) {
949 affinity.gran_levels = 0;
950 for (
int i = depth - 1;
i >= 0 &&
get_type(
i) != gran_type; --
i)
951 affinity.gran_levels++;
957#if KMP_GROUP_AFFINITY
958 _insert_windows_proc_groups();
960 _remove_radix1_layers();
961 _gather_enumeration_information();
962 _discover_uniformity();
965 _set_last_level_cache();
969 if (__kmp_mic_type == mic3) {
990 int nthreads_per_core,
int ncores) {
998 count[0] = npackages;
1001 ratio[0] = npackages;
1002 ratio[1] = ncores_per_pkg;
1003 ratio[2] = nthreads_per_core;
1011 _discover_uniformity();
1014#if KMP_AFFINITY_SUPPORTED
1030bool kmp_topology_t::restrict_to_mask(
const kmp_affin_mask_t *
mask) {
1034 for (
int i = 0;
i < num_hw_threads; ++
i) {
1035 int os_id = hw_threads[
i].
os_id;
1036 if (KMP_CPU_ISSET(os_id,
mask)) {
1038 hw_threads[new_index] = hw_threads[
i];
1041 KMP_CPU_CLR(os_id, __kmp_affin_fullMask);
1047 affected = (num_hw_threads != new_index);
1048 num_hw_threads = new_index;
1052 _gather_enumeration_information();
1053 _discover_uniformity();
1055 _set_last_level_cache();
1058 if (__kmp_num_proc_groups <= 1)
1060 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
1068bool kmp_topology_t::filter_hw_subset() {
1079 bool using_core_types =
false;
1080 bool using_core_effs =
false;
1084 int *topology_levels = (
int *)
KMP_ALLOCA(
sizeof(
int) * hw_subset_depth);
1088 for (
int i = 0;
i < hw_subset_depth; ++
i) {
1091 int num = item.
num[0];
1092 int offset = item.
offset[0];
1096 topology_levels[
i] =
level;
1115 specified[equivalent_type] =
type;
1120 if (max_count < 0 ||
1122 bool plural = (num > 1);
1130 if (core_level ==
level) {
1134 using_core_types =
true;
1136 using_core_effs =
true;
1146 if (using_core_effs) {
1153 using_core_effs =
false;
1154 using_core_types =
false;
1162 if (using_core_types && using_core_effs) {
1169 if (using_core_effs) {
1173 if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1178 KMP_MSG(AffHWSubsetAttrInvalid,
"efficiency",
buf.str),
1179 KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1189 if ((using_core_types || using_core_effs) && !is_absolute) {
1191 int num = item.
num[
j];
1193 int level_above = core_level - 1;
1194 if (level_above >= 0) {
1196 if (max_count <= 0 ||
1199 __kmp_hw_get_catalog_core_string(item.
attr[
j], &
buf, num > 0);
1208 if ((using_core_types || using_core_effs) && item.
num_attrs > 1) {
1212 if (!item.
attr[
j]) {
1214 for (
int k = 0; k < item.
num_attrs; ++k) {
1216 other_attr = item.
attr[k];
1221 __kmp_hw_get_catalog_core_string(other_attr, &
buf, item.
num[
j] > 0);
1228 for (
int k = 0; k <
j; ++k) {
1233 __kmp_hw_get_catalog_core_string(item.
attr[
j], &
buf,
1252 abs_sub_ids[
i] = -1;
1253 prev_sub_ids[
i] = -1;
1256 core_eff_sub_ids[
i] = -1;
1258 core_type_sub_ids[
i] = -1;
1263 auto is_targeted = [&](
int level) {
1265 for (
int i = 0;
i < hw_subset_depth; ++
i)
1266 if (topology_levels[
i] ==
level)
1276 switch (t.attrs.get_core_type()) {
1280#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1281 case KMP_HW_CORE_TYPE_ATOM:
1283 case KMP_HW_CORE_TYPE_CORE:
1287 KMP_ASSERT2(
false,
"Unhandled kmp_hw_thread_t enumeration");
1293 return t.attrs.get_core_eff();
1296 int num_filtered = 0;
1297 kmp_affin_mask_t *filtered_mask;
1298 KMP_CPU_ALLOC(filtered_mask);
1299 KMP_CPU_COPY(filtered_mask, __kmp_affin_fullMask);
1300 for (
int i = 0;
i < num_hw_threads; ++
i) {
1304 if (is_absolute || using_core_effs || using_core_types) {
1307 bool found_targeted =
false;
1309 bool targeted = is_targeted(
j);
1310 if (!found_targeted && targeted) {
1311 found_targeted =
true;
1313 if (
j == core_level && using_core_effs)
1314 core_eff_sub_ids[get_core_eff_index(hw_thread)]++;
1315 if (
j == core_level && using_core_types)
1316 core_type_sub_ids[get_core_type_index(hw_thread)]++;
1317 }
else if (targeted) {
1319 if (
j == core_level && using_core_effs)
1320 core_eff_sub_ids[get_core_eff_index(hw_thread)] = 0;
1321 if (
j == core_level && using_core_types)
1322 core_type_sub_ids[get_core_type_index(hw_thread)] = 0;
1333 bool should_be_filtered =
false;
1334 for (
int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1335 ++hw_subset_index) {
1337 int level = topology_levels[hw_subset_index];
1340 if ((using_core_effs || using_core_types) &&
level == core_level) {
1348 for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1349 if (using_core_types &&
1350 hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1352 if (using_core_effs &&
1353 hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1357 if (attr_idx == hw_subset_item.num_attrs) {
1358 should_be_filtered =
true;
1362 int num = hw_subset_item.num[attr_idx];
1363 int offset = hw_subset_item.offset[attr_idx];
1364 if (using_core_types)
1365 sub_id = core_type_sub_ids[get_core_type_index(hw_thread)];
1367 sub_id = core_eff_sub_ids[get_core_eff_index(hw_thread)];
1368 if (sub_id < offset ||
1370 should_be_filtered =
true;
1375 int num = hw_subset_item.num[0];
1376 int offset = hw_subset_item.offset[0];
1378 sub_id = abs_sub_ids[
level];
1384 should_be_filtered =
true;
1390 if (should_be_filtered) {
1391 KMP_CPU_CLR(hw_thread.
os_id, filtered_mask);
1397 if (num_filtered == num_hw_threads) {
1403 restrict_to_mask(filtered_mask);
1407bool kmp_topology_t::is_close(
int hwt1,
int hwt2,
1408 const kmp_affinity_t &stgs)
const {
1409 int hw_level = stgs.gran_levels;
1410 if (hw_level >= depth)
1415 if (stgs.flags.core_types_gran)
1417 if (stgs.flags.core_effs_gran)
1419 for (
int i = 0;
i < (depth - hw_level); ++
i) {
1428bool KMPAffinity::picked_api =
false;
1430void *KMPAffinity::Mask::operator
new(
size_t n) {
return __kmp_allocate(n); }
1431void *KMPAffinity::Mask::operator
new[](
size_t n) {
return __kmp_allocate(n); }
1432void KMPAffinity::Mask::operator
delete(
void *
p) {
__kmp_free(
p); }
1433void KMPAffinity::Mask::operator
delete[](
void *
p) {
__kmp_free(
p); }
1434void *KMPAffinity::operator
new(
size_t n) {
return __kmp_allocate(n); }
1435void KMPAffinity::operator
delete(
void *
p) {
__kmp_free(
p); }
1437void KMPAffinity::pick_api() {
1438 KMPAffinity *affinity_dispatch;
1444 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1445 __kmp_affinity.type != affinity_disabled) {
1446 affinity_dispatch =
new KMPHwlocAffinity();
1450 affinity_dispatch =
new KMPNativeAffinity();
1452 __kmp_affinity_dispatch = affinity_dispatch;
1456void KMPAffinity::destroy_api() {
1457 if (__kmp_affinity_dispatch != NULL) {
1458 delete __kmp_affinity_dispatch;
1459 __kmp_affinity_dispatch = NULL;
1464#define KMP_ADVANCE_SCAN(scan) \
1465 while (*scan != '\0') { \
1473char *__kmp_affinity_print_mask(
char *
buf,
int buf_len,
1474 kmp_affin_mask_t *
mask) {
1475 int start = 0, finish = 0, previous = 0;
1481 char *
end =
buf + buf_len - 1;
1484 if (
mask->begin() ==
mask->end()) {
1486 KMP_ADVANCE_SCAN(scan);
1492 start =
mask->begin();
1496 for (finish =
mask->next(start), previous = start;
1497 finish == previous + 1 && finish !=
mask->end();
1498 finish =
mask->next(finish)) {
1506 KMP_ADVANCE_SCAN(scan);
1508 first_range =
false;
1511 if (previous - start > 1) {
1516 KMP_ADVANCE_SCAN(scan);
1517 if (previous - start > 0) {
1521 KMP_ADVANCE_SCAN(scan);
1524 if (start ==
mask->end())
1535#undef KMP_ADVANCE_SCAN
1542 kmp_affin_mask_t *
mask) {
1543 int start = 0, finish = 0, previous = 0;
1551 if (
mask->begin() ==
mask->end()) {
1557 start =
mask->begin();
1561 for (finish =
mask->next(start), previous = start;
1562 finish == previous + 1 && finish !=
mask->end();
1563 finish =
mask->next(finish)) {
1572 first_range =
false;
1575 if (previous - start > 1) {
1580 if (previous - start > 0) {
1586 if (start ==
mask->end())
1592static kmp_affin_mask_t *__kmp_parse_cpu_list(
const char *
path) {
1593 kmp_affin_mask_t *
mask;
1594 KMP_CPU_ALLOC(
mask);
1597 int n, begin_cpu, end_cpu;
1599 auto skip_ws = [](FILE *
f) {
1603 }
while (isspace(c));
1612 while (!feof(file)) {
1614 n = fscanf(file,
"%d", &begin_cpu);
1618 int c = fgetc(file);
1619 if (c == EOF || c ==
',') {
1621 end_cpu = begin_cpu;
1622 }
else if (c ==
'-') {
1625 n = fscanf(file,
"%d", &end_cpu);
1635 if (begin_cpu < 0 || begin_cpu >=
__kmp_xproc || end_cpu < 0 ||
1640 for (
int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1641 KMP_CPU_SET(cpu,
mask);
1650kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1651 return __kmp_parse_cpu_list(
"/sys/devices/system/cpu/offline");
1655int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *
mask) {
1659#if KMP_GROUP_AFFINITY
1661 if (__kmp_num_proc_groups > 1) {
1664 for (group = 0; group < __kmp_num_proc_groups; group++) {
1666 int num = __kmp_GetActiveProcessorCount(group);
1667 for (
i = 0;
i < num;
i++) {
1668 KMP_CPU_SET(
i + group * (CHAR_BIT *
sizeof(DWORD_PTR)),
mask);
1678 kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1681 if (KMP_CPU_ISSET(proc, offline_cpus))
1683 KMP_CPU_SET(proc,
mask);
1686 KMP_CPU_FREE(offline_cpus);
1695kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1697kmp_affin_mask_t *__kmp_affin_origMask = NULL;
1700static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1701#if HWLOC_API_VERSION >= 0x00020000
1702 return hwloc_obj_type_is_cache(obj->type);
1704 return obj->type == HWLOC_OBJ_CACHE;
1709static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1711 if (__kmp_hwloc_is_cache_type(obj)) {
1712 if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1714 switch (obj->attr->cache.depth) {
1718#if KMP_MIC_SUPPORTED
1719 if (__kmp_mic_type == mic3) {
1730 switch (obj->type) {
1731 case HWLOC_OBJ_PACKAGE:
1733 case HWLOC_OBJ_NUMANODE:
1735 case HWLOC_OBJ_CORE:
1739 case HWLOC_OBJ_GROUP:
1740#if HWLOC_API_VERSION >= 0x00020000
1741 if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1743 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1745 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1747 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1751#if HWLOC_API_VERSION >= 0x00020100
1763static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1764 hwloc_obj_type_t
type) {
1767 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1768 obj->logical_index,
type, 0);
1769 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1770 obj->type, first) == obj;
1771 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1780static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1781 hwloc_obj_t lower) {
1783 hwloc_obj_type_t ltype = lower->type;
1784 int lindex = lower->logical_index - 1;
1787 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1788 while (obj && lindex >= 0 &&
1789 hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1790 if (obj->userdata) {
1796 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1799 lower->userdata =
RCAST(
void *, sub_id + 1);
1803static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *
const msg_id) {
1805 int hw_thread_index, sub_id;
1807 hwloc_obj_t pu, obj, root, prev;
1811 hwloc_topology_t tp = __kmp_hwloc_topology;
1812 *msg_id = kmp_i18n_null;
1813 if (__kmp_affinity.flags.verbose) {
1817 if (!KMP_AFFINITY_CAPABLE()) {
1820 KMP_ASSERT(__kmp_affinity.type == affinity_none);
1822 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1824 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1827 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1841#if HWLOC_API_VERSION >= 0x00020400
1843 int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1845 typedef struct kmp_hwloc_cpukinds_info_t {
1848 hwloc_bitmap_t
mask;
1849 } kmp_hwloc_cpukinds_info_t;
1850 kmp_hwloc_cpukinds_info_t *cpukinds =
nullptr;
1852 if (nr_cpu_kinds > 0) {
1854 struct hwloc_info_s *infos;
1856 sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1857 for (
unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1858 cpukinds[idx].efficiency = -1;
1860 cpukinds[idx].mask = hwloc_bitmap_alloc();
1861 if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].
mask,
1862 &cpukinds[idx].efficiency, &nr_infos, &infos,
1864 for (
unsigned i = 0;
i < nr_infos; ++
i) {
1866#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1868 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1871 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1882 root = hwloc_get_root_obj(tp);
1886 obj = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1887 while (obj && obj != root) {
1888#if HWLOC_API_VERSION >= 0x00020000
1889 if (obj->memory_arity) {
1891 for (memory = obj->memory_first_child; memory;
1892 memory = hwloc_get_next_child(tp, obj, memory)) {
1893 if (memory->type == HWLOC_OBJ_NUMANODE)
1896 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1898 hwloc_types[depth] = memory->type;
1903 type = __kmp_hwloc_type_2_topology_type(obj);
1905 types[depth] =
type;
1906 hwloc_types[depth] = obj->type;
1914 for (
int i = 0,
j = depth - 1;
i <
j; ++
i, --
j) {
1915 hwloc_obj_type_t hwloc_temp = hwloc_types[
i];
1917 types[
i] = types[
j];
1919 hwloc_types[
i] = hwloc_types[
j];
1920 hwloc_types[
j] = hwloc_temp;
1926 hw_thread_index = 0;
1928 while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) {
1929 int index = depth - 1;
1930 bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1934 hw_thread.
ids[index] = pu->logical_index;
1935 hw_thread.
os_id = pu->os_index;
1938#if HWLOC_API_VERSION >= 0x00020400
1940 int cpukind_index = -1;
1941 for (
int i = 0;
i < nr_cpu_kinds; ++
i) {
1942 if (hwloc_bitmap_isset(cpukinds[
i].
mask, hw_thread.
os_id)) {
1947 if (cpukind_index >= 0) {
1957 while (obj != root && obj != NULL) {
1959#if HWLOC_API_VERSION >= 0x00020000
1963 if (obj->memory_arity) {
1965 for (memory = obj->memory_first_child; memory;
1966 memory = hwloc_get_next_child(tp, obj, memory)) {
1967 if (memory->type == HWLOC_OBJ_NUMANODE)
1970 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1971 sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1973 hw_thread.
ids[index] = memory->logical_index;
1974 hw_thread.
ids[index + 1] = sub_id;
1981 type = __kmp_hwloc_type_2_topology_type(obj);
1983 sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1985 hw_thread.
ids[index] = obj->logical_index;
1986 hw_thread.
ids[index + 1] = sub_id;
1996#if HWLOC_API_VERSION >= 0x00020400
1999 for (
int idx = 0; idx < nr_cpu_kinds; ++idx)
2000 hwloc_bitmap_free(cpukinds[idx].
mask);
2012static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *
const msg_id) {
2013 *msg_id = kmp_i18n_null;
2017 if (__kmp_affinity.flags.verbose) {
2024 if (!KMP_AFFINITY_CAPABLE()) {
2025 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2042 KMP_CPU_SET_ITERATE(
i, __kmp_affin_fullMask) {
2044 if (!KMP_CPU_ISSET(
i, __kmp_affin_fullMask)) {
2051 hw_thread.
ids[0] =
i;
2052 hw_thread.
ids[1] = 0;
2053 hw_thread.
ids[2] = 0;
2056 if (__kmp_affinity.flags.verbose) {
2062#if KMP_GROUP_AFFINITY
2067static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *
const msg_id) {
2068 *msg_id = kmp_i18n_null;
2071 const static size_t BITS_PER_GROUP = CHAR_BIT *
sizeof(DWORD_PTR);
2073 if (__kmp_affinity.flags.verbose) {
2074 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
2078 if (!KMP_AFFINITY_CAPABLE()) {
2079 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2091 KMP_CPU_SET_ITERATE(
i, __kmp_affin_fullMask) {
2093 if (!KMP_CPU_ISSET(
i, __kmp_affin_fullMask)) {
2100 hw_thread.
ids[0] =
i / BITS_PER_GROUP;
2101 hw_thread.
ids[1] = hw_thread.
ids[2] =
i % BITS_PER_GROUP;
2108#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2110template <kmp_u
int32 LSB, kmp_u
int32 MSB>
2111static inline unsigned __kmp_extract_bits(
kmp_uint32 v) {
2115 retval <<= SHIFT_LEFT;
2116 retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
2120static int __kmp_cpuid_mask_width(
int count) {
2128class apicThreadInfo {
2132 unsigned maxCoresPerPkg;
2133 unsigned maxThreadsPerPkg;
2139static int __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *
a,
2141 const apicThreadInfo *aa = (
const apicThreadInfo *)
a;
2142 const apicThreadInfo *bb = (
const apicThreadInfo *)
b;
2143 if (aa->pkgId < bb->pkgId)
2145 if (aa->pkgId > bb->pkgId)
2147 if (aa->coreId < bb->coreId)
2149 if (aa->coreId > bb->coreId)
2151 if (aa->threadId < bb->threadId)
2153 if (aa->threadId > bb->threadId)
2158class cpuid_cache_info_t {
2163 bool operator==(
const info_t &rhs)
const {
2164 return level == rhs.level &&
mask == rhs.mask;
2166 bool operator!=(
const info_t &rhs)
const {
return !operator==(rhs); }
2168 cpuid_cache_info_t() : depth(0) {
2169 table[MAX_CACHE_LEVEL].level = 0;
2170 table[MAX_CACHE_LEVEL].mask = 0;
2172 size_t get_depth()
const {
return depth; }
2173 info_t &operator[](
size_t index) {
return table[index]; }
2174 const info_t &operator[](
size_t index)
const {
return table[index]; }
2175 bool operator==(
const cpuid_cache_info_t &rhs)
const {
2176 if (rhs.depth != depth)
2178 for (
size_t i = 0;
i < depth; ++
i)
2179 if (table[
i] != rhs.table[
i])
2183 bool operator!=(
const cpuid_cache_info_t &rhs)
const {
2184 return !operator==(rhs);
2188 const info_t &get_level(
unsigned level)
const {
2189 for (
size_t i = 0;
i < depth; ++
i) {
2193 return table[MAX_CACHE_LEVEL];
2208 void get_leaf4_levels() {
2210 while (depth < MAX_CACHE_LEVEL) {
2211 unsigned cache_type, max_threads_sharing;
2212 unsigned cache_level, cache_mask_width;
2214 __kmp_x86_cpuid(4,
level, &buf2);
2215 cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2219 if (cache_type == 2) {
2223 max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2224 cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2225 cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2226 table[depth].level = cache_level;
2227 table[depth].mask = ((-1) << cache_mask_width);
2232 static const int MAX_CACHE_LEVEL = 3;
2236 info_t table[MAX_CACHE_LEVEL + 1];
2243static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *
const msg_id) {
2245 *msg_id = kmp_i18n_null;
2247 if (__kmp_affinity.flags.verbose) {
2252 __kmp_x86_cpuid(0, 0, &
buf);
2254 *msg_id = kmp_i18n_str_NoLeaf4Support;
2263 if (!KMP_AFFINITY_CAPABLE()) {
2266 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2272 __kmp_x86_cpuid(1, 0, &
buf);
2273 int maxThreadsPerPkg = (
buf.ebx >> 16) & 0xff;
2274 if (maxThreadsPerPkg == 0) {
2275 maxThreadsPerPkg = 1;
2289 __kmp_x86_cpuid(0, 0, &
buf);
2291 __kmp_x86_cpuid(4, 0, &
buf);
2321 kmp_affinity_raii_t previous_affinity;
2351 unsigned nApics = 0;
2352 KMP_CPU_SET_ITERATE(
i, __kmp_affin_fullMask) {
2354 if (!KMP_CPU_ISSET(
i, __kmp_affin_fullMask)) {
2359 __kmp_affinity_dispatch->bind_thread(
i);
2360 threadInfo[nApics].osId =
i;
2363 __kmp_x86_cpuid(1, 0, &
buf);
2364 if (((
buf.edx >> 9) & 1) == 0) {
2366 *msg_id = kmp_i18n_str_ApicNotPresent;
2369 threadInfo[nApics].apicId = (
buf.ebx >> 24) & 0xff;
2370 threadInfo[nApics].maxThreadsPerPkg = (
buf.ebx >> 16) & 0xff;
2371 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2372 threadInfo[nApics].maxThreadsPerPkg = 1;
2381 __kmp_x86_cpuid(0, 0, &
buf);
2383 __kmp_x86_cpuid(4, 0, &
buf);
2384 threadInfo[nApics].maxCoresPerPkg = ((
buf.eax >> 26) & 0x3f) + 1;
2386 threadInfo[nApics].maxCoresPerPkg = 1;
2390 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2391 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2393 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2394 int widthT = widthCT - widthC;
2400 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2404 int maskC = (1 << widthC) - 1;
2405 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2407 int maskT = (1 << widthT) - 1;
2408 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2415 previous_affinity.restore();
2418 qsort(threadInfo, nApics,
sizeof(*threadInfo),
2419 __kmp_affinity_cmp_apicThreadInfo_phys_id);
2437 unsigned nCores = 1;
2440 unsigned lastPkgId = threadInfo[0].pkgId;
2441 unsigned coreCt = 1;
2442 unsigned lastCoreId = threadInfo[0].coreId;
2443 unsigned threadCt = 1;
2444 unsigned lastThreadId = threadInfo[0].threadId;
2447 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2448 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2450 for (
i = 1;
i < nApics;
i++) {
2451 if (threadInfo[
i].pkgId != lastPkgId) {
2454 lastPkgId = threadInfo[
i].pkgId;
2458 lastCoreId = threadInfo[
i].coreId;
2462 lastThreadId = threadInfo[
i].threadId;
2466 prevMaxCoresPerPkg = threadInfo[
i].maxCoresPerPkg;
2467 prevMaxThreadsPerPkg = threadInfo[
i].maxThreadsPerPkg;
2471 if (threadInfo[
i].coreId != lastCoreId) {
2474 lastCoreId = threadInfo[
i].coreId;
2478 lastThreadId = threadInfo[
i].threadId;
2479 }
else if (threadInfo[
i].threadId != lastThreadId) {
2481 lastThreadId = threadInfo[
i].threadId;
2484 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2490 if ((prevMaxCoresPerPkg != threadInfo[
i].maxCoresPerPkg) ||
2491 (prevMaxThreadsPerPkg != threadInfo[
i].maxThreadsPerPkg)) {
2493 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2514 int threadLevel = 2;
2516 int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2522 if (threadLevel >= 0)
2528 for (
i = 0;
i < nApics; ++
i) {
2530 unsigned os = threadInfo[
i].osId;
2534 if (pkgLevel >= 0) {
2535 hw_thread.
ids[idx++] = threadInfo[
i].pkgId;
2537 if (coreLevel >= 0) {
2538 hw_thread.
ids[idx++] = threadInfo[
i].coreId;
2540 if (threadLevel >= 0) {
2541 hw_thread.
ids[idx++] = threadInfo[
i].threadId;
2543 hw_thread.
os_id = os;
2552 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2561 unsigned *native_model_id) {
2563 __kmp_x86_cpuid(0x1a, 0, &
buf);
2566 case KMP_HW_CORE_TYPE_ATOM:
2569 case KMP_HW_CORE_TYPE_CORE:
2575 *native_model_id = __kmp_extract_bits<0, 23>(
buf.eax);
2597 INTEL_LEVEL_TYPE_INVALID = 0,
2598 INTEL_LEVEL_TYPE_SMT = 1,
2599 INTEL_LEVEL_TYPE_CORE = 2,
2600 INTEL_LEVEL_TYPE_MODULE = 3,
2601 INTEL_LEVEL_TYPE_TILE = 4,
2602 INTEL_LEVEL_TYPE_DIE = 5,
2603 INTEL_LEVEL_TYPE_LAST = 6,
2606#define KMP_LEAF_1F_KNOWN_LEVELS ((1u << INTEL_LEVEL_TYPE_LAST) - 1u)
2608static kmp_hw_t __kmp_intel_type_2_topology_type(
int intel_type) {
2609 switch (intel_type) {
2610 case INTEL_LEVEL_TYPE_INVALID:
2612 case INTEL_LEVEL_TYPE_SMT:
2614 case INTEL_LEVEL_TYPE_CORE:
2616 case INTEL_LEVEL_TYPE_TILE:
2618 case INTEL_LEVEL_TYPE_MODULE:
2620 case INTEL_LEVEL_TYPE_DIE:
2626static int __kmp_topology_type_2_intel_type(
kmp_hw_t type) {
2629 return INTEL_LEVEL_TYPE_INVALID;
2631 return INTEL_LEVEL_TYPE_SMT;
2633 return INTEL_LEVEL_TYPE_CORE;
2635 return INTEL_LEVEL_TYPE_TILE;
2637 return INTEL_LEVEL_TYPE_MODULE;
2639 return INTEL_LEVEL_TYPE_DIE;
2641 return INTEL_LEVEL_TYPE_INVALID;
2645struct cpuid_level_info_t {
2646 unsigned level_type,
mask, mask_width, nitems, cache_mask;
2649class cpuid_topo_desc_t {
2653 void clear() { desc = 0; }
2654 bool contains(
int intel_type)
const {
2656 if ((1u << intel_type) & desc)
2662 int intel_type = __kmp_topology_type_2_intel_type(
type);
2663 return contains(intel_type);
2665 bool contains(cpuid_topo_desc_t rhs)
const {
2666 return ((desc | rhs.desc) == desc);
2668 void add(
int intel_type) { desc |= (1u << intel_type); }
2669 void add(cpuid_topo_desc_t rhs) { desc |= rhs.desc; }
2672struct cpuid_proc_info_t {
2678 unsigned native_model_id;
2681 cpuid_topo_desc_t description;
2683 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2689static bool __kmp_x2apicid_get_levels(
int leaf, cpuid_proc_info_t *info,
2692 cpuid_topo_desc_t *total_description) {
2693 unsigned level, levels_index;
2694 unsigned level_type, mask_width, nitems;
2696 cpuid_level_info_t(&levels)[INTEL_LEVEL_TYPE_LAST] = info->levels;
2697 bool retval =
false;
2706 level = levels_index = 0;
2708 __kmp_x86_cpuid(leaf,
level, &
buf);
2709 level_type = __kmp_extract_bits<8, 15>(
buf.ecx);
2710 mask_width = __kmp_extract_bits<0, 4>(
buf.eax);
2711 nitems = __kmp_extract_bits<0, 15>(
buf.ebx);
2712 if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0) {
2717 if (KMP_LEAF_1F_KNOWN_LEVELS & (1u << level_type)) {
2719 KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2720 levels[levels_index].level_type = level_type;
2721 levels[levels_index].mask_width = mask_width;
2722 levels[levels_index].nitems = nitems;
2726 if (levels_index > 0) {
2727 levels[levels_index - 1].mask_width = mask_width;
2728 levels[levels_index - 1].nitems = nitems;
2732 }
while (level_type != INTEL_LEVEL_TYPE_INVALID);
2733 KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2734 info->description.clear();
2735 info->depth = levels_index;
2739 if (*total_depth == 0) {
2740 *total_depth = info->depth;
2741 total_description->clear();
2742 for (
int i = *total_depth - 1,
j = 0;
i >= 0; --
i, ++
j) {
2744 __kmp_intel_type_2_topology_type(info->levels[
i].level_type);
2745 total_description->add(info->levels[
i].level_type);
2751 if (levels_index == 0 || levels[0].level_type == INTEL_LEVEL_TYPE_INVALID)
2755 for (
unsigned i = 0;
i < levels_index; ++
i) {
2756 if (levels[
i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2757 levels[
i].mask = ~((-1) << levels[
i].mask_width);
2758 levels[
i].cache_mask = (-1) << levels[
i].mask_width;
2759 for (
unsigned j = 0;
j <
i; ++
j)
2763 levels[
i].mask = (-1) << levels[
i - 1].mask_width;
2764 levels[
i].cache_mask = 0;
2766 info->description.add(info->levels[
i].level_type);
2774 if (!total_description->contains(info->description)) {
2775 for (
int i = info->depth - 1,
j = 0;
i >= 0; --
i, ++
j) {
2777 if (total_description->contains(levels[
i].level_type))
2781 __kmp_intel_type_2_topology_type(levels[
i].level_type);
2784 for (
int k = info->depth - 1; k >=
j; --k) {
2786 total_types[k + 1] = total_types[k];
2789 total_types[
j] = curr_type;
2792 total_description->add(info->description);
2798static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *
const msg_id) {
2800 kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2802 int topology_leaf, highest_leaf;
2805 cpuid_topo_desc_t total_description;
2806 static int leaves[] = {0, 0};
2810 cpuid_proc_info_t *proc_info = (cpuid_proc_info_t *)
__kmp_allocate(
2811 (
sizeof(cpuid_proc_info_t) +
sizeof(cpuid_cache_info_t)) * ninfos);
2812 cpuid_cache_info_t *cache_info = (cpuid_cache_info_t *)(proc_info + ninfos);
2814 kmp_i18n_id_t leaf_message_id;
2816 *msg_id = kmp_i18n_null;
2817 if (__kmp_affinity.flags.verbose) {
2822 __kmp_x86_cpuid(0, 0, &
buf);
2823 highest_leaf =
buf.eax;
2828 if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2831 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2832 }
else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2835 leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2840 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2846 for (
int i = 0;
i < num_leaves; ++
i) {
2847 int leaf = leaves[
i];
2848 if (highest_leaf < leaf)
2850 __kmp_x86_cpuid(leaf, 0, &
buf);
2853 topology_leaf = leaf;
2854 __kmp_x2apicid_get_levels(leaf, &proc_info[0], types, &depth,
2855 &total_description);
2860 if (topology_leaf == -1 || depth == 0) {
2861 *msg_id = leaf_message_id;
2872 if (!KMP_AFFINITY_CAPABLE()) {
2875 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2876 for (
int i = 0;
i < depth; ++
i) {
2877 if (proc_info[0].levels[
i].level_type == INTEL_LEVEL_TYPE_SMT) {
2879 }
else if (proc_info[0].levels[
i].level_type == INTEL_LEVEL_TYPE_CORE) {
2894 kmp_affinity_raii_t previous_affinity;
2899 int hw_thread_index = 0;
2900 bool uniform_caches =
true;
2902 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2904 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2910 __kmp_affinity_dispatch->bind_thread(proc);
2911 __kmp_x86_cpuid(topology_leaf, 0, &
buf);
2912 proc_info[hw_thread_index].os_id = proc;
2913 proc_info[hw_thread_index].apic_id =
buf.edx;
2914 __kmp_x2apicid_get_levels(topology_leaf, &proc_info[hw_thread_index], types,
2915 &depth, &total_description);
2916 if (proc_info[hw_thread_index].depth == 0) {
2917 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2922 cache_info[hw_thread_index].get_leaf4_levels();
2923 if (uniform_caches && hw_thread_index > 0)
2924 if (cache_info[0] != cache_info[hw_thread_index])
2925 uniform_caches =
false;
2928 __kmp_get_hybrid_info(&proc_info[hw_thread_index].
type,
2929 &proc_info[hw_thread_index].efficiency,
2930 &proc_info[hw_thread_index].native_model_id);
2935 previous_affinity.restore();
2944 hw_thread.
os_id = proc_info[
i].os_id;
2946 unsigned apic_id = proc_info[
i].apic_id;
2948 for (
int j = 0, idx = depth - 1;
j < depth; ++
j, --idx) {
2949 if (!(proc_info[
i].description.contains_topology_type(
2953 hw_thread.
ids[idx] = apic_id & proc_info[
i].levels[
j].mask;
2955 hw_thread.
ids[idx] >>= proc_info[
i].levels[
j - 1].mask_width;
2966 for (
int j = 0;
j < depth - 1; ++
j) {
2973 if (hw_thread.
ids[
j] == prev_id && hw_thread.
ids[
j + 1] == curr_id) {
2974 hw_thread.
ids[
j + 1] = new_id;
2975 }
else if (hw_thread.
ids[
j] == prev_id &&
2976 hw_thread.
ids[
j + 1] != curr_id) {
2977 curr_id = hw_thread.
ids[
j + 1];
2978 hw_thread.
ids[
j + 1] = ++new_id;
2980 prev_id = hw_thread.
ids[
j];
2981 curr_id = hw_thread.
ids[
j + 1];
2982 hw_thread.
ids[
j + 1] = ++new_id;
2989 if (uniform_caches) {
2990 for (
size_t i = 0;
i < cache_info[0].get_depth(); ++
i) {
2991 unsigned cache_mask = cache_info[0][
i].mask;
2992 unsigned cache_level = cache_info[0][
i].level;
2993 KMP_ASSERT(cache_level <= cpuid_cache_info_t::MAX_CACHE_LEVEL);
2994 kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(cache_level);
2996 for (
int j = 0;
j < depth; ++
j) {
2997 unsigned hw_cache_mask = proc_info[0].levels[
j].cache_mask;
2998 if (hw_cache_mask == cache_mask &&
j < depth - 1) {
3000 proc_info[0].levels[
j + 1].level_type);
3008 for (
size_t j = 0;
j < cache_info[
i].get_depth(); ++
j) {
3009 unsigned cache_level = cache_info[
i][
j].level;
3011 cpuid_cache_info_t::get_topology_type(cache_level);
3019 bool unresolved_cache_levels =
false;
3020 for (
unsigned level = 1;
level <= cpuid_cache_info_t::MAX_CACHE_LEVEL;
3022 kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(
level);
3026 unresolved_cache_levels =
true;
3032 if (unresolved_cache_levels) {
3035 for (
unsigned l = 1; l <= cpuid_cache_info_t::MAX_CACHE_LEVEL; ++l) {
3036 kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(l);
3039 for (
int i = 0;
i < num_hw_threads; ++
i) {
3042 const cpuid_cache_info_t::info_t &info =
3043 cache_info[original_idx].get_level(l);
3045 if (info.level == 0)
3047 ids[
i] = info.mask & proc_info[original_idx].apic_id;
3056 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
3066#define threadIdIndex 1
3067#define coreIdIndex 2
3069#define nodeIdIndex 4
3071typedef unsigned *ProcCpuInfo;
3072static unsigned maxIndex = pkgIdIndex;
3074static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *
a,
3077 const unsigned *aa = *(
unsigned *
const *)
a;
3078 const unsigned *bb = *(
unsigned *
const *)
b;
3079 for (
i = maxIndex;;
i--) {
3090#if KMP_USE_HIER_SCHED
3092static void __kmp_dispatch_set_hierarchy_values() {
3101#if KMP_ARCH_X86_64 && \
3102 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3103 KMP_OS_WINDOWS) && \
3105 if (__kmp_mic_type >= mic3)
3118#if KMP_ARCH_X86_64 && \
3119 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3120 KMP_OS_WINDOWS) && \
3122 if (__kmp_mic_type >= mic3)
3140 int index =
type + 1;
3148 if (tid >= num_hw_threads)
3149 tid = tid % num_hw_threads;
3166static inline const char *__kmp_cpuinfo_get_filename() {
3167 const char *filename;
3168 if (__kmp_cpuinfo_file !=
nullptr)
3169 filename = __kmp_cpuinfo_file;
3171 filename =
"/proc/cpuinfo";
3175static inline const char *__kmp_cpuinfo_get_envvar() {
3176 const char *envvar =
nullptr;
3177 if (__kmp_cpuinfo_file !=
nullptr)
3178 envvar =
"KMP_CPUINFO_FILE";
3182static bool __kmp_package_id_from_core_siblings_list(
unsigned **threadInfo,
3185 if (!KMP_AFFINITY_CAPABLE())
3190 "/sys/devices/system/cpu/cpu%u/topology/core_siblings_list",
3191 threadInfo[idx][osIdIndex]);
3192 kmp_affin_mask_t *siblings = __kmp_parse_cpu_list(
path);
3193 for (
unsigned i = 0;
i < num_avail; ++
i) {
3194 unsigned cpu_id = threadInfo[
i][osIdIndex];
3195 KMP_ASSERT(cpu_id < __kmp_affin_mask_size * CHAR_BIT);
3196 if (!KMP_CPU_ISSET(cpu_id, siblings))
3198 if (threadInfo[
i][pkgIdIndex] == UINT_MAX) {
3201 threadInfo[
i][pkgIdIndex] = idx;
3202 }
else if (threadInfo[
i][pkgIdIndex] != idx) {
3204 KMP_CPU_FREE(siblings);
3208 KMP_ASSERT(threadInfo[idx][pkgIdIndex] != UINT_MAX);
3209 KMP_CPU_FREE(siblings);
3216static bool __kmp_affinity_create_cpuinfo_map(
int *line,
3217 kmp_i18n_id_t *
const msg_id) {
3218 *msg_id = kmp_i18n_null;
3223 const char *filename = __kmp_cpuinfo_get_filename();
3224 const char *envvar = __kmp_cpuinfo_get_envvar();
3226 if (__kmp_affinity.flags.verbose) {
3227 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
3235 unsigned num_records = 0;
3237 buf[
sizeof(
buf) - 1] = 1;
3238 if (!fgets(
buf,
sizeof(
buf),
f)) {
3243 char s1[] =
"processor";
3244 if (strncmp(
buf, s1,
sizeof(s1) - 1) == 0) {
3256 if (nodeIdIndex +
level >= maxIndex) {
3257 maxIndex = nodeIdIndex +
level;
3265 if (num_records == 0) {
3266 *msg_id = kmp_i18n_str_NoProcRecords;
3270 *msg_id = kmp_i18n_str_TooManyProcRecords;
3279 if (fseek(
f, 0, SEEK_SET) != 0) {
3280 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
3287 unsigned **threadInfo =
3288 (
unsigned **)
__kmp_allocate((num_records + 1) *
sizeof(
unsigned *));
3290 for (
i = 0;
i <= num_records;
i++) {
3295#define CLEANUP_THREAD_INFO \
3296 for (i = 0; i <= num_records; i++) { \
3297 __kmp_free(threadInfo[i]); \
3299 __kmp_free(threadInfo);
3304#define INIT_PROC_INFO(p) \
3305 for (__index = 0; __index <= maxIndex; __index++) { \
3306 (p)[__index] = UINT_MAX; \
3309 for (
i = 0;
i <= num_records;
i++) {
3310 INIT_PROC_INFO(threadInfo[
i]);
3315 lpar_info_format1_t cpuinfo;
3318 if (__kmp_affinity.flags.verbose)
3319 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY",
"system info for topology");
3322 smt_threads = syssmt(GET_NUMBER_SMT_SETS, 0, 0, NULL);
3325 rsethandle_t sys_rset = rs_alloc(RS_SYSTEM);
3326 if (sys_rset == NULL) {
3327 CLEANUP_THREAD_INFO;
3328 *msg_id = kmp_i18n_str_UnknownTopology;
3332 rsethandle_t srad = rs_alloc(RS_EMPTY);
3335 CLEANUP_THREAD_INFO;
3336 *msg_id = kmp_i18n_str_UnknownTopology;
3341 int sradsdl = rs_getinfo(NULL, R_SRADSDL, 0);
3345 CLEANUP_THREAD_INFO;
3346 *msg_id = kmp_i18n_str_UnknownTopology;
3350 int num_rads = rs_numrads(sys_rset, sradsdl, 0);
3354 CLEANUP_THREAD_INFO;
3355 *msg_id = kmp_i18n_str_UnknownTopology;
3360 int max_procs = rs_getinfo(NULL, R_MAXPROCS, 0);
3361 if (max_procs < 0) {
3364 CLEANUP_THREAD_INFO;
3365 *msg_id = kmp_i18n_str_UnknownTopology;
3371 for (
int srad_idx = 0; cur_rad < num_rads && srad_idx < VMI_MAXRADS;
3374 if (rs_getrad(sys_rset, srad, sradsdl, srad_idx, 0) < 0)
3377 for (
int cpu = 0; cpu < max_procs; cpu++) {
3379 if (rs_op(RS_TESTRESOURCE, srad, NULL, R_PROCS, cpu)) {
3380 threadInfo[cpu][osIdIndex] = cpu;
3381 threadInfo[cpu][pkgIdIndex] = cur_rad;
3382 threadInfo[cpu][coreIdIndex] = cpu / smt_threads;
3384 if (num_set >= num_avail) {
3398 unsigned num_avail = 0;
3401 bool reading_s390x_sys_info =
true;
3408 buf[
sizeof(
buf) - 1] = 1;
3409 bool long_line =
false;
3410 if (!fgets(
buf,
sizeof(
buf),
f)) {
3415 for (
i = 0;
i <= maxIndex;
i++) {
3416 if (threadInfo[num_avail][
i] != UINT_MAX) {
3424 }
else if (!
buf[
sizeof(
buf) - 1]) {
3431 CLEANUP_THREAD_INFO; \
3432 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
3438#if KMP_ARCH_LOONGARCH64
3445 if (*
buf ==
'\n' && *line == 2)
3451 if (reading_s390x_sys_info) {
3453 reading_s390x_sys_info =
false;
3459 char s1[] =
"cpu number";
3461 char s1[] =
"processor";
3463 if (strncmp(
buf, s1,
sizeof(s1) - 1) == 0) {
3465 char *
p = strchr(
buf +
sizeof(s1) - 1,
':');
3469 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
3479 threadInfo[num_avail][osIdIndex] =
val;
3480#if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
3484 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
3485 threadInfo[num_avail][osIdIndex]);
3492 "/sys/devices/system/cpu/cpu%u/topology/book_id",
3493 threadInfo[num_avail][osIdIndex]);
3495 threadInfo[num_avail][pkgIdIndex] |= (book_id << 8);
3499 "/sys/devices/system/cpu/cpu%u/topology/drawer_id",
3500 threadInfo[num_avail][osIdIndex]);
3502 threadInfo[num_avail][pkgIdIndex] |= (drawer_id << 16);
3506 "/sys/devices/system/cpu/cpu%u/topology/core_id",
3507 threadInfo[num_avail][osIdIndex]);
3512 char s2[] =
"physical id";
3513 if (strncmp(
buf, s2,
sizeof(s2) - 1) == 0) {
3515 char *
p = strchr(
buf +
sizeof(s2) - 1,
':');
3519 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
3521 threadInfo[num_avail][pkgIdIndex] =
val;
3524 char s3[] =
"core id";
3525 if (strncmp(
buf, s3,
sizeof(s3) - 1) == 0) {
3527 char *
p = strchr(
buf +
sizeof(s3) - 1,
':');
3531 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
3533 threadInfo[num_avail][coreIdIndex] =
val;
3537 char s4[] =
"thread id";
3538 if (strncmp(
buf, s4,
sizeof(s4) - 1) == 0) {
3540 char *
p = strchr(
buf +
sizeof(s4) - 1,
':');
3544 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
3546 threadInfo[num_avail][threadIdIndex] =
val;
3552 char *
p = strchr(
buf +
sizeof(s4) - 1,
':');
3560 if (threadInfo[num_avail][nodeIdIndex +
level] != UINT_MAX)
3562 threadInfo[num_avail][nodeIdIndex +
level] =
val;
3569 if ((*
buf != 0) && (*
buf !=
'\n')) {
3574 while (((ch = fgetc(
f)) != EOF) && (ch !=
'\n'))
3583 CLEANUP_THREAD_INFO;
3584 *msg_id = kmp_i18n_str_TooManyEntries;
3590 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3591 CLEANUP_THREAD_INFO;
3592 *msg_id = kmp_i18n_str_MissingProcField;
3597 if (KMP_AFFINITY_CAPABLE() &&
3598 !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3599 __kmp_affin_fullMask)) {
3600 INIT_PROC_INFO(threadInfo[num_avail]);
3608 INIT_PROC_INFO(threadInfo[num_avail]);
3613 CLEANUP_THREAD_INFO;
3614 *msg_id = kmp_i18n_str_MissingValCpuinfo;
3618 CLEANUP_THREAD_INFO;
3619 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3626 for (
i = 0;
i < num_avail; ++
i) {
3627 if (threadInfo[
i][pkgIdIndex] == UINT_MAX) {
3628 if (!__kmp_package_id_from_core_siblings_list(threadInfo, num_avail,
i)) {
3629 CLEANUP_THREAD_INFO;
3630 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3636#if KMP_MIC && REDUCE_TEAM_SIZE
3637 unsigned teamSize = 0;
3649 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
3650 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3672 bool assign_thread_ids =
false;
3673 unsigned threadIdCt;
3680 if (assign_thread_ids) {
3681 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3682 threadInfo[0][threadIdIndex] = threadIdCt++;
3683 }
else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3684 threadIdCt = threadInfo[0][threadIdIndex] + 1;
3687 for (index = 0; index <= maxIndex; index++) {
3691 lastId[index] = threadInfo[0][index];
3696 for (
i = 1;
i < num_avail;
i++) {
3699 for (index = maxIndex; index >= threadIdIndex; index--) {
3700 if (assign_thread_ids && (index == threadIdIndex)) {
3702 if (threadInfo[
i][threadIdIndex] == UINT_MAX) {
3703 threadInfo[
i][threadIdIndex] = threadIdCt++;
3707 else if (threadIdCt <= threadInfo[
i][threadIdIndex]) {
3708 threadIdCt = threadInfo[
i][threadIdIndex] + 1;
3711 if (threadInfo[
i][index] != lastId[index]) {
3716 for (index2 = threadIdIndex; index2 < index; index2++) {
3718 if (counts[index2] > maxCt[index2]) {
3719 maxCt[index2] = counts[index2];
3722 lastId[index2] = threadInfo[
i][index2];
3726 lastId[index] = threadInfo[
i][index];
3728 if (assign_thread_ids && (index > threadIdIndex)) {
3730#if KMP_MIC && REDUCE_TEAM_SIZE
3733 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3740 if (threadInfo[
i][threadIdIndex] == UINT_MAX) {
3741 threadInfo[
i][threadIdIndex] = threadIdCt++;
3747 else if (threadIdCt <= threadInfo[
i][threadIdIndex]) {
3748 threadIdCt = threadInfo[
i][threadIdIndex] + 1;
3754 if (index < threadIdIndex) {
3758 if ((threadInfo[
i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3763 CLEANUP_THREAD_INFO;
3764 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3770 assign_thread_ids =
true;
3771 goto restart_radix_check;
3775#if KMP_MIC && REDUCE_TEAM_SIZE
3778 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3781 for (index = threadIdIndex; index <= maxIndex; index++) {
3782 if (counts[index] > maxCt[index]) {
3783 maxCt[index] = counts[index];
3796 if (!KMP_AFFINITY_CAPABLE()) {
3797 KMP_ASSERT(__kmp_affinity.type == affinity_none);
3801#if KMP_MIC && REDUCE_TEAM_SIZE
3805 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting "
3806 "__kmp_dflt_team_nth = %d\n",
3819 for (index = threadIdIndex; index < maxIndex; index++) {
3820 KMP_ASSERT(totals[index] >= totals[index + 1]);
3821 inMap[index] = (totals[index] > totals[index + 1]);
3823 inMap[maxIndex] = (totals[maxIndex] > 1);
3824 inMap[pkgIdIndex] =
true;
3825 inMap[coreIdIndex] =
true;
3826 inMap[threadIdIndex] =
true;
3833 int threadLevel = -1;
3834 for (index = threadIdIndex; index <= maxIndex; index++) {
3839 if (inMap[pkgIdIndex]) {
3843 if (inMap[coreIdIndex]) {
3847 if (inMap[threadIdIndex]) {
3856 for (
i = 0;
i < num_avail; ++
i) {
3857 unsigned os = threadInfo[
i][osIdIndex];
3861 hw_thread.
os_id = os;
3865 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3866 if (!inMap[src_index]) {
3869 if (src_index == pkgIdIndex) {
3870 hw_thread.
ids[pkgLevel] = threadInfo[
i][src_index];
3871 }
else if (src_index == coreIdIndex) {
3872 hw_thread.
ids[coreLevel] = threadInfo[
i][src_index];
3873 }
else if (src_index == threadIdIndex) {
3874 hw_thread.
ids[threadLevel] = threadInfo[
i][src_index];
3884 CLEANUP_THREAD_INFO;
3901 for (
int j = 0;
j < tlevel; ++
j) {
3902 if (hw_thread.
ids[
j] != prev_hw_thread.
ids[
j]) {
3903 hw_thread.
ids[tlevel] = 0;
3908 hw_thread.
ids[tlevel] = prev_hw_thread.
ids[tlevel] + 1;
3915 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3924template <
typename FindNextFunctionType>
3925static void __kmp_create_os_id_masks(
unsigned *numUnique,
3926 kmp_affinity_t &affinity,
3927 FindNextFunctionType find_next) {
3933 const char *env_var = __kmp_get_affinity_env_var(affinity);
3944 for (
i = numAddrs - 1;; --
i) {
3946 if (osId > maxOsId) {
3952 affinity.num_os_id_masks = maxOsId + 1;
3953 KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks);
3955 if (affinity.flags.verbose && (affinity.gran_levels > 0)) {
3956 KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels);
3958 if (affinity.gran_levels >= (
int)depth) {
3969 kmp_affin_mask_t *
sum;
3970 KMP_CPU_ALLOC_ON_STACK(
sum);
3973 i =
j = leader = find_next(-1);
3976 for (
i = find_next(
i);
i < numAddrs;
i = find_next(
i)) {
3987 for (;
j <
i;
j = find_next(
j)) {
3990 kmp_affin_mask_t *
mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
4005 for (;
j <
i;
j = find_next(
j)) {
4008 kmp_affin_mask_t *
mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
4014 KMP_CPU_FREE_FROM_STACK(
sum);
4017 if (
full_mask.restrict_to_mask() && affinity.flags.verbose) {
4021 *numUnique = unique;
4027static kmp_affin_mask_t *newMasks;
4028static int numNewMasks;
4029static int nextNewMask;
4031#define ADD_MASK(_mask) \
4033 if (nextNewMask >= numNewMasks) { \
4036 kmp_affin_mask_t *temp; \
4037 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
4038 for (i = 0; i < numNewMasks / 2; i++) { \
4039 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
4040 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
4041 KMP_CPU_COPY(dest, src); \
4043 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
4046 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
4050#define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
4052 if (((_osId) > _maxOsId) || \
4053 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
4054 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \
4056 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
4062static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) {
4064 kmp_affin_mask_t **out_masks = &affinity.masks;
4065 unsigned *out_numMasks = &affinity.num_masks;
4066 const char *proclist = affinity.proclist;
4067 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4068 int maxOsId = affinity.num_os_id_masks - 1;
4069 const char *scan = proclist;
4070 const char *next = proclist;
4075 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
4077 kmp_affin_mask_t *sumMask;
4078 KMP_CPU_ALLOC(sumMask);
4082 int start,
end, stride;
4086 if (*next ==
'\0') {
4098 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad proclist");
4104 if ((num > maxOsId) ||
4105 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
4107 KMP_CPU_ZERO(sumMask);
4109 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
4129 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4136 if ((num > maxOsId) ||
4137 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
4140 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
4157 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4160 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
4165 ADD_MASK_OSID(start, osId2Mask, maxOsId);
4179 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4199 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4202 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
4207 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
4213 KMP_ASSERT2((
end - start) / stride <= 65536,
"bad explicit proc list");
4218 ADD_MASK_OSID(start, osId2Mask, maxOsId);
4220 }
while (start <=
end);
4223 ADD_MASK_OSID(start, osId2Mask, maxOsId);
4225 }
while (start >=
end);
4236 *out_numMasks = nextNewMask;
4237 if (nextNewMask == 0) {
4239 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4242 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
4243 for (
i = 0;
i < nextNewMask;
i++) {
4244 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks,
i);
4245 kmp_affin_mask_t *
dest = KMP_CPU_INDEX((*out_masks),
i);
4246 KMP_CPU_COPY(dest, src);
4248 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4249 KMP_CPU_FREE(sumMask);
4272static void __kmp_process_subplace_list(
const char **scan,
4273 kmp_affinity_t &affinity,
int maxOsId,
4274 kmp_affin_mask_t *tempMask,
4277 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4280 int start,
count, stride,
i;
4284 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4293 if (**scan ==
'}' || **scan ==
',') {
4294 if ((start > maxOsId) ||
4295 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4298 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4301 if (**scan ==
'}') {
4307 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
4312 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4321 if (**scan ==
'}' || **scan ==
',') {
4323 if ((start > maxOsId) ||
4324 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4328 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4333 if (**scan ==
'}') {
4339 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
4346 if (**scan ==
'+') {
4350 if (**scan ==
'-') {
4358 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4368 if (**scan ==
'}' || **scan ==
',') {
4370 if ((start > maxOsId) ||
4371 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4375 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4380 if (**scan ==
'}') {
4391static void __kmp_process_place(
const char **scan, kmp_affinity_t &affinity,
4392 int maxOsId, kmp_affin_mask_t *tempMask,
4395 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4399 if (**scan ==
'{') {
4401 __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize);
4402 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
4404 }
else if (**scan ==
'!') {
4406 __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize);
4407 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
4408 }
else if ((**scan >=
'0') && (**scan <=
'9')) {
4413 if ((num > maxOsId) ||
4414 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
4417 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
4427void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) {
4428 int i,
j,
count, stride, sign;
4429 kmp_affin_mask_t **out_masks = &affinity.masks;
4430 unsigned *out_numMasks = &affinity.num_masks;
4431 const char *placelist = affinity.proclist;
4432 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4433 int maxOsId = affinity.num_os_id_masks - 1;
4434 const char *scan = placelist;
4435 const char *next = placelist;
4438 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
4444 kmp_affin_mask_t *tempMask;
4445 kmp_affin_mask_t *previousMask;
4446 KMP_CPU_ALLOC(tempMask);
4447 KMP_CPU_ZERO(tempMask);
4448 KMP_CPU_ALLOC(previousMask);
4449 KMP_CPU_ZERO(previousMask);
4453 __kmp_process_place(&scan, affinity, maxOsId, tempMask, &setSize);
4457 if (*scan ==
'\0' || *scan ==
',') {
4461 KMP_CPU_ZERO(tempMask);
4463 if (*scan ==
'\0') {
4470 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
4475 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
4484 if (*scan ==
'\0' || *scan ==
',') {
4487 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
4506 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
4521 KMP_CPU_COPY(previousMask, tempMask);
4522 ADD_MASK(previousMask);
4523 KMP_CPU_ZERO(tempMask);
4525 KMP_CPU_SET_ITERATE(
j, previousMask) {
4526 if (!KMP_CPU_ISSET(
j, previousMask)) {
4529 if ((
j + stride > maxOsId) || (
j + stride < 0) ||
4530 (!KMP_CPU_ISSET(
j, __kmp_affin_fullMask)) ||
4531 (!KMP_CPU_ISSET(
j + stride,
4532 KMP_CPU_INDEX(osId2Mask,
j + stride)))) {
4538 KMP_CPU_SET(
j + stride, tempMask);
4542 KMP_CPU_ZERO(tempMask);
4547 if (*scan ==
'\0') {
4558 *out_numMasks = nextNewMask;
4559 if (nextNewMask == 0) {
4561 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4564 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
4565 KMP_CPU_FREE(tempMask);
4566 KMP_CPU_FREE(previousMask);
4567 for (
i = 0;
i < nextNewMask;
i++) {
4568 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks,
i);
4569 kmp_affin_mask_t *
dest = KMP_CPU_INDEX((*out_masks),
i);
4570 KMP_CPU_COPY(dest, src);
4572 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4580static int __kmp_affinity_find_core_level(
int nprocs,
int bottom_level) {
4583 for (
int i = 0;
i < nprocs;
i++) {
4585 for (
int j = bottom_level;
j > 0;
j--) {
4586 if (hw_thread.
ids[
j] > 0) {
4587 if (core_level < (
j - 1)) {
4597static int __kmp_affinity_compute_ncores(
int nprocs,
int bottom_level,
4602static int __kmp_affinity_find_core(
int proc,
int bottom_level,
4605 KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4606 for (
int i = 0;
i <= proc; ++
i) {
4607 if (
i + 1 <= proc) {
4608 for (
int j = 0;
j <= core_level; ++
j) {
4622static int __kmp_affinity_max_proc_per_core(
int nprocs,
int bottom_level,
4624 if (core_level >= bottom_level)
4630static int *procarr = NULL;
4631static int __kmp_aff_depth = 0;
4632static int *__kmp_osid_to_hwthread_map = NULL;
4634static void __kmp_affinity_get_mask_topology_info(
const kmp_affin_mask_t *
mask,
4635 kmp_affinity_ids_t &ids,
4636 kmp_affinity_attrs_t &attrs) {
4637 if (!KMP_AFFINITY_CAPABLE())
4643 attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
4649 KMP_CPU_SET_ITERATE(cpu,
mask) {
4650 int osid_idx = __kmp_osid_to_hwthread_map[cpu];
4682static void __kmp_affinity_get_thread_topology_info(
kmp_info_t *th) {
4683 if (!KMP_AFFINITY_CAPABLE())
4685 const kmp_affin_mask_t *
mask = th->th.th_affin_mask;
4686 kmp_affinity_ids_t &ids = th->th.th_topology_ids;
4687 kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs;
4688 __kmp_affinity_get_mask_topology_info(
mask, ids, attrs);
4694static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) {
4695 if (!KMP_AFFINITY_CAPABLE())
4697 if (affinity.type != affinity_none) {
4705 int max_cpu = __kmp_affin_fullMask->get_max_cpu();
4709 if (!affinity.ids) {
4711 sizeof(kmp_affinity_ids_t) * affinity.num_masks);
4713 if (!affinity.attrs) {
4715 sizeof(kmp_affinity_attrs_t) * affinity.num_masks);
4717 if (!__kmp_osid_to_hwthread_map) {
4719 __kmp_osid_to_hwthread_map =
4724 for (
int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread) {
4726 if (KMP_CPU_ISSET(os_id, __kmp_affin_fullMask))
4727 __kmp_osid_to_hwthread_map[os_id] = hw_thread;
4730 for (
unsigned i = 0;
i < affinity.num_masks; ++
i) {
4731 kmp_affinity_ids_t &ids = affinity.ids[
i];
4732 kmp_affinity_attrs_t &attrs = affinity.attrs[
i];
4733 kmp_affin_mask_t *
mask = KMP_CPU_INDEX(affinity.masks,
i);
4734 __kmp_affinity_get_mask_topology_info(
mask, ids, attrs);
4739static void __kmp_aux_affinity_initialize_other_data(kmp_affinity_t &affinity) {
4743 __kmp_affinity_get_topology_info(affinity);
4744#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
4745 __kmp_first_osid_with_ecore = __kmp_get_first_osid_with_ecore();
4752static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) {
4756 affinity.num_masks = 1;
4757 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4758 kmp_affin_mask_t *
dest = KMP_CPU_INDEX(affinity.masks, 0);
4759 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4760 __kmp_aux_affinity_initialize_other_data(affinity);
4763static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) {
4768 int verbose = affinity.flags.verbose;
4769 const char *env_var = affinity.env_var;
4772 if (__kmp_affin_fullMask && __kmp_affin_origMask)
4775 if (__kmp_affin_fullMask == NULL) {
4776 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4778 if (__kmp_affin_origMask == NULL) {
4779 KMP_CPU_ALLOC(__kmp_affin_origMask);
4781 if (KMP_AFFINITY_CAPABLE()) {
4782 __kmp_get_system_affinity(__kmp_affin_fullMask,
TRUE);
4784 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4785 if (affinity.flags.respect) {
4789 KMP_CPU_SET_ITERATE(
i, __kmp_affin_fullMask) {
4790 if (!KMP_CPU_ISSET(
i, __kmp_affin_fullMask)) {
4797 affinity.type = affinity_none;
4798 KMP_AFFINITY_DISABLE();
4803 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4804 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
4805 __kmp_affin_fullMask);
4810 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4811 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
4812 __kmp_affin_fullMask);
4816 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4818 if (__kmp_num_proc_groups <= 1) {
4820 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4824 __kmp_affin_fullMask->set_process_affinity(
true);
4830static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) {
4831 bool success =
false;
4832 const char *env_var = affinity.env_var;
4833 kmp_i18n_id_t msg_id = kmp_i18n_null;
4834 int verbose = affinity.flags.verbose;
4838 if ((__kmp_cpuinfo_file != NULL) &&
4839 (__kmp_affinity_top_method == affinity_top_method_all)) {
4840 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4843 if (__kmp_affinity_top_method == affinity_top_method_all) {
4849 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4850 if (!__kmp_hwloc_error) {
4851 success = __kmp_affinity_create_hwloc_map(&msg_id);
4852 if (!success && verbose) {
4855 }
else if (verbose) {
4861#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4863 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4864 if (!success && verbose && msg_id != kmp_i18n_null) {
4869 success = __kmp_affinity_create_apicid_map(&msg_id);
4870 if (!success && verbose && msg_id != kmp_i18n_null) {
4876#if KMP_OS_LINUX || KMP_OS_AIX
4879 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4880 if (!success && verbose && msg_id != kmp_i18n_null) {
4886#if KMP_GROUP_AFFINITY
4887 if (!success && (__kmp_num_proc_groups > 1)) {
4888 success = __kmp_affinity_create_proc_group_map(&msg_id);
4889 if (!success && verbose && msg_id != kmp_i18n_null) {
4896 success = __kmp_affinity_create_flat_map(&msg_id);
4897 if (!success && verbose && msg_id != kmp_i18n_null) {
4908 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4909 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4910 success = __kmp_affinity_create_hwloc_map(&msg_id);
4918#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4919 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4920 __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4921 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4926 }
else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4927 success = __kmp_affinity_create_apicid_map(&msg_id);
4935 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4937 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4940 const char *filename = __kmp_cpuinfo_get_filename();
4942 KMP_FATAL(FileLineMsgExiting, filename, line,
4950#if KMP_GROUP_AFFINITY
4951 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4952 success = __kmp_affinity_create_proc_group_map(&msg_id);
4961 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4962 success = __kmp_affinity_create_flat_map(&msg_id);
4969 if (KMP_AFFINITY_CAPABLE()) {
4989 if (filtered && verbose)
4994static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) {
4995 bool is_regular_affinity = (&affinity == &__kmp_affinity);
4996 bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity);
4997 const char *env_var = __kmp_get_affinity_env_var(affinity);
4999 if (affinity.flags.initialized) {
5004 if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask))
5005 __kmp_aux_affinity_initialize_masks(affinity);
5008 bool success = __kmp_aux_affinity_initialize_topology(affinity);
5012 affinity.type = affinity_none;
5013 KMP_AFFINITY_DISABLE();
5020 if (affinity.type == affinity_none) {
5021 __kmp_create_affinity_none_places(affinity);
5022#if KMP_USE_HIER_SCHED
5023 __kmp_dispatch_set_hierarchy_values();
5025 affinity.flags.initialized =
TRUE;
5033 unsigned numUnique = 0;
5037 if (affinity.core_attr_gran.valid) {
5038 __kmp_create_os_id_masks(&numUnique, affinity, [&](
int idx) {
5040 for (
int i = idx + 1;
i < numAddrs; ++
i)
5045 if (!affinity.os_id_masks) {
5046 const char *core_attribute;
5048 core_attribute =
"core_efficiency";
5050 core_attribute =
"core_type";
5059 if (!affinity.os_id_masks) {
5060 int gran = affinity.gran_levels;
5061 int gran_level = depth - 1 - affinity.gran_levels;
5062 if (gran >= 0 && gran_level >= 0 && gran_level < depth) {
5063 __kmp_create_os_id_masks(
5064 &numUnique, affinity, [depth, numAddrs, &affinity](
int idx) {
5066 int gran = affinity.gran_levels;
5067 int gran_level = depth - 1 - affinity.gran_levels;
5068 for (
int i = idx + 1;
i < numAddrs; ++
i)
5069 if ((gran >= depth) ||
5070 (gran < depth && __kmp_topology->at(
i).ids[gran_level] !=
5078 if (!affinity.os_id_masks) {
5079 __kmp_create_os_id_masks(&numUnique, affinity, [](
int idx) {
5085 switch (affinity.type) {
5087 case affinity_explicit:
5089 if (is_hidden_helper_affinity ||
5091 __kmp_affinity_process_proclist(affinity);
5093 __kmp_affinity_process_placelist(affinity);
5095 if (affinity.num_masks == 0) {
5097 affinity.type = affinity_none;
5098 __kmp_create_affinity_none_places(affinity);
5099 affinity.flags.initialized =
TRUE;
5108 case affinity_logical:
5109 affinity.compact = 0;
5110 if (affinity.offset) {
5116 case affinity_physical:
5118 affinity.compact = 1;
5119 if (affinity.compact >= depth) {
5120 affinity.compact = 0;
5123 affinity.compact = 0;
5125 if (affinity.offset) {
5131 case affinity_scatter:
5132 if (affinity.compact >= depth) {
5133 affinity.compact = 0;
5135 affinity.compact = depth - 1 - affinity.compact;
5139 case affinity_compact:
5140 if (affinity.compact >= depth) {
5141 affinity.compact = depth - 1;
5145 case affinity_balanced:
5146 if (depth <= 1 || is_hidden_helper_affinity) {
5148 affinity.type = affinity_none;
5149 __kmp_create_affinity_none_places(affinity);
5150 affinity.flags.initialized =
TRUE;
5154 __kmp_aff_depth = depth;
5160 int maxprocpercore = __kmp_affinity_max_proc_per_core(
5163 int nproc = ncores * maxprocpercore;
5166 affinity.type = affinity_none;
5167 __kmp_create_affinity_none_places(affinity);
5168 affinity.flags.initialized =
TRUE;
5173 for (
int i = 0;
i < nproc;
i++) {
5181 int core = __kmp_affinity_find_core(
i, depth - 1, core_level);
5183 if (core == lastcore) {
5190 procarr[core * maxprocpercore + inlastcore] = proc;
5193 if (affinity.compact >= depth) {
5194 affinity.compact = depth - 1;
5199 if (affinity.flags.dups) {
5202 affinity.num_masks = numUnique;
5208 !is_hidden_helper_affinity) {
5212 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
5222 for (
i = 0,
j = 0;
i < num_hw_threads;
i++) {
5228 kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId);
5229 if (KMP_CPU_ISEMPTY(src))
5231 kmp_affin_mask_t *
dest = KMP_CPU_INDEX(affinity.masks,
j);
5233 KMP_CPU_COPY(dest, src);
5235 if (++
j >= affinity.num_masks) {
5241 if (
full_mask.restrict_to_mask() && affinity.flags.verbose) {
5252 __kmp_aux_affinity_initialize_other_data(affinity);
5253 affinity.flags.initialized =
TRUE;
5256void __kmp_affinity_initialize(kmp_affinity_t &affinity) {
5265 int disabled = (affinity.type == affinity_disabled);
5266 if (!KMP_AFFINITY_CAPABLE())
5269 affinity.type = affinity_none;
5270 __kmp_aux_affinity_initialize(affinity);
5272 affinity.type = affinity_disabled;
5275void __kmp_affinity_uninitialize(
void) {
5276 for (kmp_affinity_t *affinity : __kmp_affinities) {
5277 if (affinity->masks != NULL)
5278 KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks);
5279 if (affinity->os_id_masks != NULL)
5280 KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks);
5281 if (affinity->proclist != NULL)
5283 if (affinity->ids != NULL)
5285 if (affinity->attrs != NULL)
5287 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
5289 if (__kmp_affin_origMask != NULL) {
5290 if (KMP_AFFINITY_CAPABLE()) {
5293 bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
5295 __kmp_set_system_affinity(__kmp_affin_origMask,
FALSE);
5298 KMP_CPU_FREE(__kmp_affin_origMask);
5299 __kmp_affin_origMask = NULL;
5302 if (procarr != NULL) {
5306 if (__kmp_osid_to_hwthread_map) {
5308 __kmp_osid_to_hwthread_map = NULL;
5311 if (__kmp_hwloc_topology != NULL) {
5312 hwloc_topology_destroy(__kmp_hwloc_topology);
5313 __kmp_hwloc_topology = NULL;
5324 KMPAffinity::destroy_api();
5327static void __kmp_select_mask_by_gtid(
int gtid,
const kmp_affinity_t *affinity,
5328 int *place, kmp_affin_mask_t **
mask) {
5331 if (is_hidden_helper)
5334 mask_idx = gtid - 2;
5338 *place = (mask_idx + affinity->offset) % affinity->num_masks;
5339 *
mask = KMP_CPU_INDEX(affinity->masks, *place);
5344void __kmp_affinity_set_init_mask(
int gtid,
int isa_root) {
5351 th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
5353 if (!KMP_AFFINITY_CAPABLE()) {
5357 if (th->th.th_affin_mask == NULL) {
5358 KMP_CPU_ALLOC(th->th.th_affin_mask);
5360 KMP_CPU_ZERO(th->th.th_affin_mask);
5368 kmp_affin_mask_t *
mask;
5370 const kmp_affinity_t *affinity;
5373 if (is_hidden_helper)
5374 affinity = &__kmp_hh_affinity;
5376 affinity = &__kmp_affinity;
5378 if (KMP_AFFINITY_NON_PROC_BIND || is_hidden_helper) {
5379 if ((affinity->type == affinity_none) ||
5380 (affinity->type == affinity_balanced) ||
5382#if KMP_GROUP_AFFINITY
5383 if (__kmp_num_proc_groups > 1) {
5389 mask = __kmp_affin_fullMask;
5391 __kmp_select_mask_by_gtid(gtid, affinity, &
i, &
mask);
5395#if KMP_GROUP_AFFINITY
5396 if (__kmp_num_proc_groups > 1) {
5402 mask = __kmp_affin_fullMask;
5404 __kmp_select_mask_by_gtid(gtid, affinity, &
i, &
mask);
5408 th->th.th_current_place =
i;
5409 if (isa_root && !is_hidden_helper) {
5410 th->th.th_new_place =
i;
5411 th->th.th_first_place = 0;
5412 th->th.th_last_place = affinity->num_masks - 1;
5413 }
else if (KMP_AFFINITY_NON_PROC_BIND) {
5416 th->th.th_first_place = 0;
5417 th->th.th_last_place = affinity->num_masks - 1;
5421 th->th.th_topology_ids = __kmp_affinity.ids[
i];
5422 th->th.th_topology_attrs = __kmp_affinity.attrs[
i];
5425 if (
i == KMP_PLACE_ALL) {
5426 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: setting T#%d to all places\n",
5429 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: setting T#%d to place %d\n",
5433 KMP_CPU_COPY(th->th.th_affin_mask,
mask);
5436void __kmp_affinity_bind_init_mask(
int gtid) {
5437 if (!KMP_AFFINITY_CAPABLE()) {
5441 const kmp_affinity_t *affinity;
5442 const char *env_var;
5445 if (is_hidden_helper)
5446 affinity = &__kmp_hh_affinity;
5448 affinity = &__kmp_affinity;
5449 env_var = __kmp_get_affinity_env_var(*affinity,
true);
5451 if (affinity->flags.verbose && (affinity->type == affinity_none ||
5452 (th->th.th_current_place != KMP_PLACE_ALL &&
5453 affinity->type != affinity_balanced)) &&
5455 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5456 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5457 th->th.th_affin_mask);
5466 if (affinity->type == affinity_none) {
5467 __kmp_set_system_affinity(th->th.th_affin_mask,
FALSE);
5472 __kmp_set_system_affinity(th->th.th_affin_mask,
TRUE);
5476void __kmp_affinity_bind_place(
int gtid) {
5484 KA_TRACE(100, (
"__kmp_affinity_bind_place: binding T#%d to place %d (current "
5486 gtid, th->th.th_new_place, th->th.th_current_place));
5491 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity.num_masks);
5492 if (th->th.th_first_place <= th->th.th_last_place) {
5493 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
5494 (th->th.th_new_place <= th->th.th_last_place));
5496 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
5497 (th->th.th_new_place >= th->th.th_last_place));
5502 kmp_affin_mask_t *
mask =
5503 KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place);
5504 KMP_CPU_COPY(th->th.th_affin_mask,
mask);
5505 th->th.th_current_place = th->th.th_new_place;
5507 if (__kmp_affinity.flags.verbose) {
5508 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5509 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5510 th->th.th_affin_mask);
5512 __kmp_gettid(), gtid,
buf);
5514 __kmp_set_system_affinity(th->th.th_affin_mask,
TRUE);
5517int __kmp_aux_set_affinity(
void **
mask) {
5522 if (!KMP_AFFINITY_CAPABLE()) {
5529 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5530 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5531 (kmp_affin_mask_t *)(*
mask));
5533 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
5538 if ((
mask == NULL) || (*
mask == NULL)) {
5539 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5544 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*
mask))) {
5545 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5546 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5548 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*
mask))) {
5553 if (num_procs == 0) {
5554 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5557#if KMP_GROUP_AFFINITY
5558 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*
mask)) < 0) {
5559 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5567 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*
mask),
FALSE);
5569 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*
mask));
5572 th->th.th_current_place = KMP_PLACE_UNDEFINED;
5573 th->th.th_new_place = KMP_PLACE_UNDEFINED;
5574 th->th.th_first_place = 0;
5575 th->th.th_last_place = __kmp_affinity.num_masks - 1;
5583int __kmp_aux_get_affinity(
void **
mask) {
5586#if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5589 if (!KMP_AFFINITY_CAPABLE()) {
5594#if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5603 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5604 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5605 th->th.th_affin_mask);
5607 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
5612 if ((
mask == NULL) || (*
mask == NULL)) {
5613 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
5617#if !KMP_OS_WINDOWS && !KMP_OS_AIX
5619 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*
mask),
FALSE);
5622 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5623 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5624 (kmp_affin_mask_t *)(*
mask));
5626 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
5634 KMP_CPU_COPY((kmp_affin_mask_t *)(*
mask), th->th.th_affin_mask);
5640int __kmp_aux_get_affinity_max_proc() {
5641 if (!KMP_AFFINITY_CAPABLE()) {
5644#if KMP_GROUP_AFFINITY
5645 if (__kmp_num_proc_groups > 1) {
5646 return (
int)(__kmp_num_proc_groups *
sizeof(DWORD_PTR) * CHAR_BIT);
5652int __kmp_aux_set_affinity_mask_proc(
int proc,
void **
mask) {
5653 if (!KMP_AFFINITY_CAPABLE()) {
5660 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5661 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5662 (kmp_affin_mask_t *)(*
mask));
5664 "affinity mask for thread %d = %s\n",
5669 if ((
mask == NULL) || (*
mask == NULL)) {
5670 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
5674 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5677 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5681 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*
mask));
5685int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **
mask) {
5686 if (!KMP_AFFINITY_CAPABLE()) {
5693 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5694 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5695 (kmp_affin_mask_t *)(*
mask));
5697 "affinity mask for thread %d = %s\n",
5702 if ((
mask == NULL) || (*
mask == NULL)) {
5703 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
5707 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5710 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5714 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*
mask));
5718int __kmp_aux_get_affinity_mask_proc(
int proc,
void **
mask) {
5719 if (!KMP_AFFINITY_CAPABLE()) {
5726 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5727 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
5728 (kmp_affin_mask_t *)(*
mask));
5730 "affinity mask for thread %d = %s\n",
5735 if ((
mask == NULL) || (*
mask == NULL)) {
5736 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
5740 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5743 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5747 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*
mask));
5750#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
5752int __kmp_get_first_osid_with_ecore(
void) {
5756 while (high - low > 1) {
5757 mid = (high + low) / 2;
5759 KMP_HW_CORE_TYPE_CORE) {
5773void __kmp_balanced_affinity(
kmp_info_t *th,
int nthreads) {
5775 bool fine_gran =
true;
5776 int tid = th->th.th_info.ds.ds_tid;
5777 const char *env_var =
"KMP_AFFINITY";
5783 switch (__kmp_affinity.gran) {
5807 if ((
nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5812 int chunk = nthreads / ncores;
5814 int big_cores = nthreads % ncores;
5816 int big_nth = (chunk + 1) * big_cores;
5817 if (tid < big_nth) {
5818 coreID = tid / (chunk + 1);
5819 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5821 coreID = (tid - big_cores) / chunk;
5822 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5825 "Illegal set affinity operation when not capable");
5827 kmp_affin_mask_t *
mask = th->th.th_affin_mask;
5833 KMP_CPU_SET(osID,
mask);
5835 for (
int i = 0;
i < __kmp_nth_per_core;
i++) {
5838 KMP_CPU_SET(osID,
mask);
5841 if (__kmp_affinity.flags.verbose) {
5842 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5843 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
mask);
5847 __kmp_affinity_get_thread_topology_info(th);
5848 __kmp_set_system_affinity(
mask,
TRUE);
5851 kmp_affin_mask_t *
mask = th->th.th_affin_mask;
5857 __kmp_aff_depth - 1, core_level);
5858 int nth_per_core = __kmp_affinity_max_proc_per_core(
5866 KMP_CPU_SET(osID,
mask);
5869 __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5872 if (__kmp_affinity_find_core(
i, __kmp_aff_depth - 1, core_level) ==
5874 KMP_CPU_SET(osID,
mask);
5878 }
else if (nthreads <= ncores) {
5881 for (
int i = 0;
i < ncores;
i++) {
5884 for (
int j = 0;
j < nth_per_core;
j++) {
5885 if (procarr[
i * nth_per_core +
j] != -1) {
5892 for (
int j = 0;
j < nth_per_core;
j++) {
5893 int osID = procarr[
i * nth_per_core +
j];
5895 KMP_CPU_SET(osID,
mask);
5911 int *nproc_at_core = (
int *)
KMP_ALLOCA(
sizeof(
int) * ncores);
5913 int *ncores_with_x_procs =
5914 (
int *)
KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5916 int *ncores_with_x_to_max_procs =
5917 (
int *)
KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5919 for (
int i = 0;
i <= nth_per_core;
i++) {
5920 ncores_with_x_procs[
i] = 0;
5921 ncores_with_x_to_max_procs[
i] = 0;
5924 for (
int i = 0;
i < ncores;
i++) {
5926 for (
int j = 0;
j < nth_per_core;
j++) {
5927 if (procarr[
i * nth_per_core +
j] != -1) {
5931 nproc_at_core[
i] = cnt;
5932 ncores_with_x_procs[cnt]++;
5935 for (
int i = 0;
i <= nth_per_core;
i++) {
5936 for (
int j =
i;
j <= nth_per_core;
j++) {
5937 ncores_with_x_to_max_procs[
i] += ncores_with_x_procs[
j];
5942 int nproc = nth_per_core * ncores;
5945 for (
int i = 0;
i < nproc;
i++) {
5952 for (
int j = 1;
j <= nth_per_core;
j++) {
5953 int cnt = ncores_with_x_to_max_procs[
j];
5954 for (
int i = 0;
i < ncores;
i++) {
5956 if (nproc_at_core[
i] == 0) {
5959 for (
int k = 0; k < nth_per_core; k++) {
5960 if (procarr[
i * nth_per_core + k] != -1) {
5961 if (newarr[
i * nth_per_core + k] == 0) {
5962 newarr[
i * nth_per_core + k] = 1;
5968 newarr[
i * nth_per_core + k]++;
5976 if (cnt == 0 || nth == 0) {
5987 for (
int i = 0;
i < nproc;
i++) {
5991 int osID = procarr[
i];
5992 KMP_CPU_SET(osID,
mask);
5994 int coreID =
i / nth_per_core;
5995 for (
int ii = 0;
ii < nth_per_core;
ii++) {
5996 int osID = procarr[coreID * nth_per_core +
ii];
5998 KMP_CPU_SET(osID,
mask);
6008 if (__kmp_affinity.flags.verbose) {
6009 char buf[KMP_AFFIN_MASK_PRINT_LEN];
6010 __kmp_affinity_print_mask(
buf, KMP_AFFIN_MASK_PRINT_LEN,
mask);
6014 __kmp_affinity_get_thread_topology_info(th);
6015 __kmp_set_system_affinity(
mask,
TRUE);
6019#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
6034 kmp_set_thread_affinity_mask_initial()
6042 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
6043 "non-omp thread, returning\n"));
6047 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
6048 "affinity not initialized, returning\n"));
6051 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
6052 "set full mask for thread %d\n",
6056 return bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
6058 return __kmp_set_system_affinity(__kmp_affin_fullMask,
FALSE);
kmp_uint32 * numPerLevel
Level 0 corresponds to leaves.
kmp_uint32 * skipPerLevel
void resize(kmp_uint32 nproc)
kmp_uint32 base_num_threads
volatile kmp_int8 uninitialized
kmp_uint32 depth
This is specifically the depth of the machine configuration hierarchy, in terms of the number of leve...
void canonicalize(const kmp_topology_t *top)
static void deallocate(kmp_hw_subset_t *subset)
const item_t & at(int index) const
static const int UNKNOWN_ID
static int compare_compact(const void *a, const void *b)
static int compare_ids(const void *a, const void *b)
static const int MULTIPLE_ID
This class safely opens and closes a C-style FILE* object using RAII semantics.
int try_open(const char *filename, const char *mode)
Instead of erroring out, return non-zero when unsuccessful fopen() for any reason.
kmp_hw_thread_t & at(int index)
int get_level(kmp_hw_t type) const
int get_count(int level) const
int get_ratio(int level) const
static void deallocate(kmp_topology_t *)
kmp_hw_t get_equivalent_type(kmp_hw_t type) const
void set_equivalent_type(kmp_hw_t type1, kmp_hw_t type2)
int get_num_hw_threads() const
int get_ncores_with_attr_per(const kmp_hw_attr_t &attr, int above) const
void insert_layer(kmp_hw_t type, const int *ids)
int calculate_ratio(int level1, int level2) const
static kmp_topology_t * allocate(int nproc, int ndepth, const kmp_hw_t *types)
void print(const char *env_var="KMP_AFFINITY") const
kmp_hw_t get_type(int level) const
int get_ncores_with_attr(const kmp_hw_attr_t &attr) const
__itt_string_handle * name
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope ITT_FORMAT d const __itt_domain __itt_scope __itt_string_handle const char size_t ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_relation __itt_id ITT_FORMAT lu __itt_track_group __itt_string_handle __itt_track_group_type ITT_FORMAT d __itt_track ITT_FORMAT p void int const int int const char int ITT_FORMAT d void void const char * path
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
#define KMP_HW_MAX_NUM_CORE_EFFS
#define KMP_FOREACH_HW_TYPE(type)
kmp_nested_proc_bind_t __kmp_nested_proc_bind
#define KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)
#define __kmp_entry_gtid()
#define KMP_ASSERT_VALID_HW_TYPE(type)
const char * __kmp_hw_get_catalog_string(kmp_hw_t type, bool plural=false)
volatile int __kmp_init_middle
const char * __kmp_hw_get_keyword(kmp_hw_t type, bool plural=false)
int __kmp_affinity_num_places
kmp_info_t ** __kmp_threads
#define KMP_HIDDEN_HELPER_THREAD(gtid)
#define __kmp_allocate(size)
static bool __kmp_is_hybrid_cpu()
int __kmp_env_consistency_check
static int __kmp_adjust_gtid_for_hidden_helpers(int gtid)
const char * __kmp_hw_get_core_type_string(kmp_hw_core_type_t type)
static int __kmp_gtid_from_thread(const kmp_info_t *thr)
@ KMP_HW_MAX_NUM_CORE_TYPES
@ KMP_HW_CORE_TYPE_UNKNOWN
static void __kmp_type_convert(T1 src, T2 *dest)
struct KMP_ALIGN_CACHE kmp_bstate kmp_bstate_t
union KMP_ALIGN_CACHE kmp_info kmp_info_t
kmp_hw_subset_t * __kmp_hw_subset
static hierarchy_info machine_hierarchy
kmp_topology_t * __kmp_topology
void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar)
#define KMP_AFF_WARNING(s,...)
static int __kmp_nThreadsPerCore
const char * __kmp_hw_get_catalog_string(kmp_hw_t type, bool plural)
const char * __kmp_hw_get_core_type_string(kmp_hw_core_type_t type)
void __kmp_cleanup_hierarchy()
const char * __kmp_hw_get_keyword(kmp_hw_t type, bool plural)
kmp_hw_subset_t * __kmp_hw_subset
kmp_topology_t * __kmp_topology
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 add
KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86 KMP_ARCH_X86<<, 2i, 1, KMP_ARCH_X86) ATOMIC_CMPXCHG(fixed2, shr, kmp_int16, 16, > KMP_ARCH_X86 KMP_ARCH_X86 kmp_uint32
void __kmp_debug_printf(char const *format,...)
#define KMP_BUILD_ASSERT(expr)
#define KMP_DEBUG_ASSERT2(cond, msg)
#define KMP_DEBUG_ASSERT(cond)
#define KMP_ASSERT2(cond, msg)
int __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LAST+1]
int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type)
int __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LAST+1]
int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2)
static volatile kmp_i18n_cat_status_t status
static void __kmp_msg(kmp_msg_severity_t severity, kmp_msg_t message, va_list ap)
char const * __kmp_i18n_catgets(kmp_i18n_id_t id)
void __kmp_printf(char const *format,...)
#define KMP_BUILTIN_UNREACHABLE
int __kmp_str_to_int(char const *str, char sentinel)
void __kmp_str_buf_clear(kmp_str_buf_t *buffer)
int __kmp_str_match(char const *target, int len, char const *data)
void __kmp_str_buf_free(kmp_str_buf_t *buffer)
int __kmp_str_buf_print(kmp_str_buf_t *buffer, char const *format,...)
#define __kmp_str_buf_init(b)
affinity_mask_t * full_mask
bool contains(const kmp_hw_attr_t &other) const
bool is_core_type_valid() const
kmp_hw_core_type_t get_core_type() const
void set_core_type(kmp_hw_core_type_t type)
static const int UNKNOWN_CORE_EFF
void set_core_eff(int eff)
bool is_core_eff_valid() const
kmp_hw_attr_t attr[MAX_ATTRS]
kmp_proc_bind_t * bind_types
int __kmp_read_from_file(char const *path, char const *format,...)