14 #include "kmp_affinity.h" 19 #include "kmp_stats.h" 21 #include "kmp_wait_release.h" 22 #include "kmp_wrapper_getpid.h" 24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD 28 #include <semaphore.h> 29 #include <sys/resource.h> 30 #include <sys/syscall.h> 32 #include <sys/times.h> 36 #include <sys/sysinfo.h> 51 #include <mach/mach.h> 52 #include <sys/sysctl.h> 53 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD 54 #include <sys/types.h> 55 #include <sys/sysctl.h> 57 #include <pthread_np.h> 58 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD 59 #include <sys/types.h> 60 #include <sys/sysctl.h> 67 #include "tsan_annotations.h" 69 struct kmp_sys_timer {
70 struct timespec start;
74 #define TS2NS(timespec) \ 75 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec) 77 static struct kmp_sys_timer __kmp_sys_timer_data;
79 #if KMP_HANDLE_SIGNALS 80 typedef void (*sig_func_t)(int);
81 STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
82 static sigset_t __kmp_sigset;
85 static int __kmp_init_runtime = FALSE;
87 static int __kmp_fork_count = 0;
89 static pthread_condattr_t __kmp_suspend_cond_attr;
90 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
92 static kmp_cond_align_t __kmp_wait_cv;
93 static kmp_mutex_align_t __kmp_wait_mx;
95 kmp_uint64 __kmp_ticks_per_msec = 1000000;
98 static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
99 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
100 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
101 cond->c_cond.__c_waiting);
105 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED) 109 void __kmp_affinity_bind_thread(
int which) {
110 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
111 "Illegal set affinity operation when not capable");
113 kmp_affin_mask_t *mask;
114 KMP_CPU_ALLOC_ON_STACK(mask);
116 KMP_CPU_SET(which, mask);
117 __kmp_set_system_affinity(mask, TRUE);
118 KMP_CPU_FREE_FROM_STACK(mask);
124 void __kmp_affinity_determine_capable(
const char *env_var) {
128 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024) 130 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t)) 140 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
141 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf);
142 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 143 "initial getaffinity call returned %ld errno = %d\n",
149 if (__kmp_affinity_verbose ||
150 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
151 (__kmp_affinity_type != affinity_default) &&
152 (__kmp_affinity_type != affinity_disabled))) {
154 kmp_msg_t err_code = KMP_ERR(error);
155 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
156 err_code, __kmp_msg_null);
157 if (__kmp_generate_warnings == kmp_warnings_off) {
158 __kmp_str_free(&err_code.str);
161 KMP_AFFINITY_DISABLE();
162 KMP_INTERNAL_FREE(buf);
170 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
171 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 172 "setaffinity for mask size %ld returned %ld errno = %d\n",
173 gCode, sCode, errno));
175 if (errno == ENOSYS) {
176 if (__kmp_affinity_verbose ||
177 (__kmp_affinity_warnings &&
178 (__kmp_affinity_type != affinity_none) &&
179 (__kmp_affinity_type != affinity_default) &&
180 (__kmp_affinity_type != affinity_disabled))) {
182 kmp_msg_t err_code = KMP_ERR(error);
183 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
184 err_code, __kmp_msg_null);
185 if (__kmp_generate_warnings == kmp_warnings_off) {
186 __kmp_str_free(&err_code.str);
189 KMP_AFFINITY_DISABLE();
190 KMP_INTERNAL_FREE(buf);
192 if (errno == EFAULT) {
193 KMP_AFFINITY_ENABLE(gCode);
194 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 195 "affinity supported (mask size %d)\n",
196 (
int)__kmp_affin_mask_size));
197 KMP_INTERNAL_FREE(buf);
205 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 206 "searching for proper set size\n"));
208 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
209 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
210 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 211 "getaffinity for mask size %ld returned %ld errno = %d\n",
212 size, gCode, errno));
215 if (errno == ENOSYS) {
217 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 218 "inconsistent OS call behavior: errno == ENOSYS for mask " 221 if (__kmp_affinity_verbose ||
222 (__kmp_affinity_warnings &&
223 (__kmp_affinity_type != affinity_none) &&
224 (__kmp_affinity_type != affinity_default) &&
225 (__kmp_affinity_type != affinity_disabled))) {
227 kmp_msg_t err_code = KMP_ERR(error);
228 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
229 err_code, __kmp_msg_null);
230 if (__kmp_generate_warnings == kmp_warnings_off) {
231 __kmp_str_free(&err_code.str);
234 KMP_AFFINITY_DISABLE();
235 KMP_INTERNAL_FREE(buf);
241 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
242 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 243 "setaffinity for mask size %ld returned %ld errno = %d\n",
244 gCode, sCode, errno));
246 if (errno == ENOSYS) {
248 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 249 "inconsistent OS call behavior: errno == ENOSYS for mask " 252 if (__kmp_affinity_verbose ||
253 (__kmp_affinity_warnings &&
254 (__kmp_affinity_type != affinity_none) &&
255 (__kmp_affinity_type != affinity_default) &&
256 (__kmp_affinity_type != affinity_disabled))) {
258 kmp_msg_t err_code = KMP_ERR(error);
259 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
260 err_code, __kmp_msg_null);
261 if (__kmp_generate_warnings == kmp_warnings_off) {
262 __kmp_str_free(&err_code.str);
265 KMP_AFFINITY_DISABLE();
266 KMP_INTERNAL_FREE(buf);
269 if (errno == EFAULT) {
270 KMP_AFFINITY_ENABLE(gCode);
271 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 272 "affinity supported (mask size %d)\n",
273 (
int)__kmp_affin_mask_size));
274 KMP_INTERNAL_FREE(buf);
282 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
283 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
284 reinterpret_cast<cpuset_t *>(buf));
285 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 286 "initial getaffinity call returned %d errno = %d\n",
289 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
290 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 291 "affinity supported (mask size %d)\n",
292 (
int)__kmp_affin_mask_size));
293 KMP_INTERNAL_FREE(buf);
299 KMP_INTERNAL_FREE(buf);
304 KMP_AFFINITY_DISABLE();
305 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 306 "cannot determine mask size - affinity not supported\n"));
307 if (__kmp_affinity_verbose ||
308 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
309 (__kmp_affinity_type != affinity_default) &&
310 (__kmp_affinity_type != affinity_disabled))) {
311 KMP_WARNING(AffCantGetMaskSize, env_var);
315 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 319 int __kmp_futex_determine_capable() {
321 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
322 int retval = (rc == 0) || (errno != ENOSYS);
325 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
326 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
327 retval ?
"" :
" not"));
332 #endif // KMP_USE_FUTEX 334 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS) 338 kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
339 kmp_int8 old_value, new_value;
341 old_value = TCR_1(*p);
342 new_value = old_value | d;
344 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
346 old_value = TCR_1(*p);
347 new_value = old_value | d;
352 kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
353 kmp_int8 old_value, new_value;
355 old_value = TCR_1(*p);
356 new_value = old_value & d;
358 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
360 old_value = TCR_1(*p);
361 new_value = old_value & d;
366 kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
367 kmp_uint32 old_value, new_value;
369 old_value = TCR_4(*p);
370 new_value = old_value | d;
372 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
374 old_value = TCR_4(*p);
375 new_value = old_value | d;
380 kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
381 kmp_uint32 old_value, new_value;
383 old_value = TCR_4(*p);
384 new_value = old_value & d;
386 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
388 old_value = TCR_4(*p);
389 new_value = old_value & d;
395 kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
396 kmp_int8 old_value, new_value;
398 old_value = TCR_1(*p);
399 new_value = old_value + d;
401 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
403 old_value = TCR_1(*p);
404 new_value = old_value + d;
409 kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
410 kmp_int64 old_value, new_value;
412 old_value = TCR_8(*p);
413 new_value = old_value + d;
415 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
417 old_value = TCR_8(*p);
418 new_value = old_value + d;
424 kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
425 kmp_uint64 old_value, new_value;
427 old_value = TCR_8(*p);
428 new_value = old_value | d;
429 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
431 old_value = TCR_8(*p);
432 new_value = old_value | d;
437 kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
438 kmp_uint64 old_value, new_value;
440 old_value = TCR_8(*p);
441 new_value = old_value & d;
442 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
444 old_value = TCR_8(*p);
445 new_value = old_value & d;
452 void __kmp_terminate_thread(
int gtid) {
454 kmp_info_t *th = __kmp_threads[gtid];
459 #ifdef KMP_CANCEL_THREADS 460 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
461 status = pthread_cancel(th->th.th_info.ds.ds_thread);
462 if (status != 0 && status != ESRCH) {
463 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
474 static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
476 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 486 if (!KMP_UBER_GTID(gtid)) {
489 status = pthread_attr_init(&attr);
490 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
491 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD 492 status = pthread_attr_get_np(pthread_self(), &attr);
493 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
495 status = pthread_getattr_np(pthread_self(), &attr);
496 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
498 status = pthread_attr_getstack(&attr, &addr, &size);
499 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
501 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:" 502 " %lu, low addr: %p\n",
504 status = pthread_attr_destroy(&attr);
505 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
508 if (size != 0 && addr != 0) {
510 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
511 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
512 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
518 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
519 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
520 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
524 static void *__kmp_launch_worker(
void *thr) {
525 int status, old_type, old_state;
526 #ifdef KMP_BLOCK_SIGNALS 527 sigset_t new_set, old_set;
530 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 531 KMP_OS_OPENBSD || KMP_OS_HURD 532 void *
volatile padding = 0;
536 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
537 __kmp_gtid_set_specific(gtid);
538 #ifdef KMP_TDATA_GTID 541 #if KMP_STATS_ENABLED 543 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
544 __kmp_stats_thread_ptr->startLife();
545 KMP_SET_THREAD_STATE(IDLE);
550 __kmp_itt_thread_name(gtid);
553 #if KMP_AFFINITY_SUPPORTED 554 __kmp_affinity_set_init_mask(gtid, FALSE);
557 #ifdef KMP_CANCEL_THREADS 558 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
559 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
561 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
562 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
565 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 567 __kmp_clear_x87_fpu_status_word();
568 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
569 __kmp_load_mxcsr(&__kmp_init_mxcsr);
572 #ifdef KMP_BLOCK_SIGNALS 573 status = sigfillset(&new_set);
574 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
575 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
576 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
579 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 581 if (__kmp_stkoffset > 0 && gtid > 0) {
582 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
587 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
589 __kmp_check_stack_overlap((kmp_info_t *)thr);
591 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
593 #ifdef KMP_BLOCK_SIGNALS 594 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
595 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
604 static void *__kmp_launch_monitor(
void *thr) {
605 int status, old_type, old_state;
606 #ifdef KMP_BLOCK_SIGNALS 609 struct timespec interval;
613 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
616 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
617 #ifdef KMP_TDATA_GTID 618 __kmp_gtid = KMP_GTID_MONITOR;
625 __kmp_itt_thread_ignore();
628 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
631 __kmp_check_stack_overlap((kmp_info_t *)thr);
633 #ifdef KMP_CANCEL_THREADS 634 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
635 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
637 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
638 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
641 #if KMP_REAL_TIME_FIX 646 int sched = sched_getscheduler(0);
647 if (sched == SCHED_FIFO || sched == SCHED_RR) {
650 struct sched_param param;
651 int max_priority = sched_get_priority_max(sched);
653 KMP_WARNING(RealTimeSchedNotSupported);
654 sched_getparam(0, ¶m);
655 if (param.sched_priority < max_priority) {
656 param.sched_priority += 1;
657 rc = sched_setscheduler(0, sched, ¶m);
660 kmp_msg_t err_code = KMP_ERR(error);
661 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
662 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
663 if (__kmp_generate_warnings == kmp_warnings_off) {
664 __kmp_str_free(&err_code.str);
671 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
672 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
677 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
679 #endif // KMP_REAL_TIME_FIX 683 if (__kmp_monitor_wakeups == 1) {
685 interval.tv_nsec = 0;
688 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
691 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
693 while (!TCR_4(__kmp_global.g.g_done)) {
699 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
701 status = gettimeofday(&tval, NULL);
702 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
703 TIMEVAL_TO_TIMESPEC(&tval, &now);
705 now.tv_sec += interval.tv_sec;
706 now.tv_nsec += interval.tv_nsec;
708 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
710 now.tv_nsec -= KMP_NSEC_PER_SEC;
713 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
714 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
716 if (!TCR_4(__kmp_global.g.g_done)) {
717 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
718 &__kmp_wait_mx.m_mutex, &now);
720 if (status != ETIMEDOUT && status != EINTR) {
721 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
725 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
726 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
728 TCW_4(__kmp_global.g.g_time.dt.t_value,
729 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
734 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
736 #ifdef KMP_BLOCK_SIGNALS 737 status = sigfillset(&new_set);
738 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
739 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
740 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
743 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
745 if (__kmp_global.g.g_abort != 0) {
751 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
752 __kmp_global.g.g_abort));
757 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
758 __kmp_terminate_thread(gtid);
762 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
763 __kmp_global.g.g_abort));
765 if (__kmp_global.g.g_abort > 0)
766 raise(__kmp_global.g.g_abort);
769 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
773 #endif // KMP_USE_MONITOR 775 void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
777 pthread_attr_t thread_attr;
780 th->th.th_info.ds.ds_gtid = gtid;
782 #if KMP_STATS_ENABLED 784 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
790 if (!KMP_UBER_GTID(gtid)) {
791 th->th.th_stats = __kmp_stats_list->push_back(gtid);
795 th->th.th_stats = __kmp_stats_thread_ptr;
797 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
799 #endif // KMP_STATS_ENABLED 801 if (KMP_UBER_GTID(gtid)) {
802 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
803 th->th.th_info.ds.ds_thread = pthread_self();
804 __kmp_set_stack_info(gtid, th);
805 __kmp_check_stack_overlap(th);
809 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
813 #ifdef KMP_THREAD_ATTR 814 status = pthread_attr_init(&thread_attr);
816 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
818 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
820 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
830 stack_size += gtid * __kmp_stkoffset * 2;
832 #if defined(__ANDROID__) && __ANDROID_API__ < 19 836 stack_size = (stack_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
839 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 840 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
841 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
843 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 844 status = pthread_attr_setstacksize(&thread_attr, stack_size);
845 #ifdef KMP_BACKUP_STKSIZE 847 if (!__kmp_env_stksize) {
848 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
849 __kmp_stksize = KMP_BACKUP_STKSIZE;
850 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 851 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu " 853 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
854 status = pthread_attr_setstacksize(&thread_attr, stack_size);
859 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
860 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
867 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
868 if (status != 0 || !handle) {
869 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 870 if (status == EINVAL) {
871 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
872 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
874 if (status == ENOMEM) {
875 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
876 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
879 if (status == EAGAIN) {
880 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
881 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
883 KMP_SYSFAIL(
"pthread_create", status);
886 th->th.th_info.ds.ds_thread = handle;
888 #ifdef KMP_THREAD_ATTR 889 status = pthread_attr_destroy(&thread_attr);
891 kmp_msg_t err_code = KMP_ERR(status);
892 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
894 if (__kmp_generate_warnings == kmp_warnings_off) {
895 __kmp_str_free(&err_code.str);
902 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
907 void __kmp_create_monitor(kmp_info_t *th) {
909 pthread_attr_t thread_attr;
912 int auto_adj_size = FALSE;
914 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
916 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of " 918 th->th.th_info.ds.ds_tid = 0;
919 th->th.th_info.ds.ds_gtid = 0;
922 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
926 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
927 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
928 #if KMP_REAL_TIME_FIX 929 TCW_4(__kmp_global.g.g_time.dt.t_value,
932 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
933 #endif // KMP_REAL_TIME_FIX 935 #ifdef KMP_THREAD_ATTR 936 if (__kmp_monitor_stksize == 0) {
937 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
938 auto_adj_size = TRUE;
940 status = pthread_attr_init(&thread_attr);
942 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
944 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
946 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
949 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 950 status = pthread_attr_getstacksize(&thread_attr, &size);
951 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
953 size = __kmp_sys_min_stksize;
957 if (__kmp_monitor_stksize == 0) {
958 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
960 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
961 __kmp_monitor_stksize = __kmp_sys_min_stksize;
964 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes," 965 "requested stacksize = %lu bytes\n",
966 size, __kmp_monitor_stksize));
971 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 972 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
973 __kmp_monitor_stksize));
974 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
977 __kmp_monitor_stksize *= 2;
980 kmp_msg_t err_code = KMP_ERR(status);
981 __kmp_msg(kmp_ms_warning,
982 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
983 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
984 if (__kmp_generate_warnings == kmp_warnings_off) {
985 __kmp_str_free(&err_code.str);
991 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
994 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 995 if (status == EINVAL) {
996 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
997 __kmp_monitor_stksize *= 2;
1000 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
1001 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
1004 if (status == ENOMEM) {
1005 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
1006 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
1010 if (status == EAGAIN) {
1011 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
1012 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
1014 KMP_SYSFAIL(
"pthread_create", status);
1017 th->th.th_info.ds.ds_thread = handle;
1019 #if KMP_REAL_TIME_FIX 1021 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
1022 sizeof(__kmp_global.g.g_time.dt.t_value));
1023 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
1024 &__kmp_neq_4, NULL);
1025 #endif // KMP_REAL_TIME_FIX 1027 #ifdef KMP_THREAD_ATTR 1028 status = pthread_attr_destroy(&thread_attr);
1030 kmp_msg_t err_code = KMP_ERR(status);
1031 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
1033 if (__kmp_generate_warnings == kmp_warnings_off) {
1034 __kmp_str_free(&err_code.str);
1041 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
1042 th->th.th_info.ds.ds_thread));
1045 #endif // KMP_USE_MONITOR 1047 void __kmp_exit_thread(
int exit_status) {
1048 pthread_exit((
void *)(intptr_t)exit_status);
1052 void __kmp_resume_monitor();
1054 void __kmp_reap_monitor(kmp_info_t *th) {
1058 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle" 1060 th->th.th_info.ds.ds_thread));
1065 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1066 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1067 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1077 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1078 if (status != ESRCH) {
1079 __kmp_resume_monitor();
1081 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1082 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1083 if (exit_val != th) {
1084 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1087 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1088 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1090 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle" 1092 th->th.th_info.ds.ds_thread));
1096 #endif // KMP_USE_MONITOR 1098 void __kmp_reap_worker(kmp_info_t *th) {
1105 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1107 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1111 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1113 if (exit_val != th) {
1114 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, " 1116 th->th.th_info.ds.ds_gtid, exit_val));
1120 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1121 th->th.th_info.ds.ds_gtid));
1126 #if KMP_HANDLE_SIGNALS 1128 static void __kmp_null_handler(
int signo) {
1132 static void __kmp_team_handler(
int signo) {
1133 if (__kmp_global.g.g_abort == 0) {
1136 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1151 if (__kmp_debug_buf) {
1152 __kmp_dump_debug_buffer();
1154 __kmp_unregister_library();
1156 TCW_4(__kmp_global.g.g_abort, signo);
1158 TCW_4(__kmp_global.g.g_done, TRUE);
1163 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1170 static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1171 struct sigaction *oldact) {
1172 int rc = sigaction(signum, act, oldact);
1173 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1176 static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1177 int parallel_init) {
1180 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1181 if (parallel_init) {
1182 struct sigaction new_action;
1183 struct sigaction old_action;
1184 new_action.sa_handler = handler_func;
1185 new_action.sa_flags = 0;
1186 sigfillset(&new_action.sa_mask);
1187 __kmp_sigaction(sig, &new_action, &old_action);
1188 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1189 sigaddset(&__kmp_sigset, sig);
1192 __kmp_sigaction(sig, &old_action, NULL);
1196 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1201 static void __kmp_remove_one_handler(
int sig) {
1202 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1203 if (sigismember(&__kmp_sigset, sig)) {
1204 struct sigaction old;
1206 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1207 if ((old.sa_handler != __kmp_team_handler) &&
1208 (old.sa_handler != __kmp_null_handler)) {
1210 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, " 1211 "restoring: sig=%d\n",
1213 __kmp_sigaction(sig, &old, NULL);
1215 sigdelset(&__kmp_sigset, sig);
1220 void __kmp_install_signals(
int parallel_init) {
1221 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1222 if (__kmp_handle_signals || !parallel_init) {
1225 sigemptyset(&__kmp_sigset);
1226 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1227 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1228 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1229 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1230 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1231 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1232 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1233 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1235 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1237 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1239 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1244 void __kmp_remove_signals(
void) {
1246 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1247 for (sig = 1; sig < NSIG; ++sig) {
1248 __kmp_remove_one_handler(sig);
1252 #endif // KMP_HANDLE_SIGNALS 1254 void __kmp_enable(
int new_state) {
1255 #ifdef KMP_CANCEL_THREADS 1256 int status, old_state;
1257 status = pthread_setcancelstate(new_state, &old_state);
1258 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1259 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1263 void __kmp_disable(
int *old_state) {
1264 #ifdef KMP_CANCEL_THREADS 1266 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1267 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1271 static void __kmp_atfork_prepare(
void) {
1272 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1273 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1276 static void __kmp_atfork_parent(
void) {
1277 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1278 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1284 static void __kmp_atfork_child(
void) {
1285 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1286 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1293 #if KMP_AFFINITY_SUPPORTED 1294 #if KMP_OS_LINUX || KMP_OS_FREEBSD 1297 kmp_set_thread_affinity_mask_initial();
1302 __kmp_affinity_type = affinity_none;
1303 if (__kmp_nested_proc_bind.bind_types != NULL) {
1304 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1306 #endif // KMP_AFFINITY_SUPPORTED 1309 __kmp_init_monitor = 0;
1311 __kmp_init_parallel = FALSE;
1312 __kmp_init_middle = FALSE;
1313 __kmp_init_serial = FALSE;
1314 TCW_4(__kmp_init_gtid, FALSE);
1315 __kmp_init_common = FALSE;
1317 TCW_4(__kmp_init_user_locks, FALSE);
1318 #if !KMP_USE_DYNAMIC_LOCK 1319 __kmp_user_lock_table.used = 1;
1320 __kmp_user_lock_table.allocated = 0;
1321 __kmp_user_lock_table.table = NULL;
1322 __kmp_lock_blocks = NULL;
1326 TCW_4(__kmp_nth, 0);
1328 __kmp_thread_pool = NULL;
1329 __kmp_thread_pool_insert_pt = NULL;
1330 __kmp_team_pool = NULL;
1334 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1335 __kmp_threadpriv_cache_list));
1337 while (__kmp_threadpriv_cache_list != NULL) {
1339 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1340 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1341 &(*__kmp_threadpriv_cache_list->addr)));
1343 *__kmp_threadpriv_cache_list->addr = NULL;
1345 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1348 __kmp_init_runtime = FALSE;
1351 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1352 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1353 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1354 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1360 __kmp_serial_initialize();
1373 void __kmp_register_atfork(
void) {
1374 if (__kmp_need_register_atfork) {
1375 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1376 __kmp_atfork_child);
1377 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1378 __kmp_need_register_atfork = FALSE;
1382 void __kmp_suspend_initialize(
void) {
1384 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1385 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1386 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1387 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1390 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1391 ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
1392 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1393 int new_value = __kmp_fork_count + 1;
1395 if (old_value == new_value)
1398 if (old_value == -1 || !__kmp_atomic_compare_store(
1399 &th->th.th_suspend_init_count, old_value, -1)) {
1400 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1406 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1407 &__kmp_suspend_cond_attr);
1408 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1409 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1410 &__kmp_suspend_mutex_attr);
1411 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1412 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1413 ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
1417 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1418 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1423 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1424 if (status != 0 && status != EBUSY) {
1425 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1427 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1428 if (status != 0 && status != EBUSY) {
1429 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1431 --th->th.th_suspend_init_count;
1432 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1438 int __kmp_try_suspend_mx(kmp_info_t *th) {
1439 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1442 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1443 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1444 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1447 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1448 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1449 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1455 static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1456 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1457 kmp_info_t *th = __kmp_threads[th_gtid];
1459 typename C::flag_t old_spin;
1461 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1464 __kmp_suspend_initialize_thread(th);
1466 __kmp_lock_suspend_mx(th);
1468 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1469 th_gtid, flag->get()));
1473 old_spin = flag->set_sleeping();
1474 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1475 __kmp_pause_status != kmp_soft_paused) {
1476 flag->unset_sleeping();
1477 __kmp_unlock_suspend_mx(th);
1480 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x," 1482 th_gtid, flag->get(), flag->load(), old_spin));
1484 if (flag->done_check_val(old_spin)) {
1485 old_spin = flag->unset_sleeping();
1486 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit " 1488 th_gtid, flag->get()));
1493 int deactivated = FALSE;
1494 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1496 while (flag->is_sleeping()) {
1497 #ifdef DEBUG_SUSPEND 1499 __kmp_suspend_count++;
1500 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1501 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1507 th->th.th_active = FALSE;
1508 if (th->th.th_active_in_pool) {
1509 th->th.th_active_in_pool = FALSE;
1510 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1511 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1516 #if USE_SUSPEND_TIMEOUT 1517 struct timespec now;
1518 struct timeval tval;
1521 status = gettimeofday(&tval, NULL);
1522 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1523 TIMEVAL_TO_TIMESPEC(&tval, &now);
1525 msecs = (4 * __kmp_dflt_blocktime) + 200;
1526 now.tv_sec += msecs / 1000;
1527 now.tv_nsec += (msecs % 1000) * 1000;
1529 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform " 1530 "pthread_cond_timedwait\n",
1532 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1533 &th->th.th_suspend_mx.m_mutex, &now);
1535 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform" 1536 " pthread_cond_wait\n",
1538 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1539 &th->th.th_suspend_mx.m_mutex);
1540 #endif // USE_SUSPEND_TIMEOUT 1542 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1543 KMP_SYSFAIL(
"pthread_cond_wait", status);
1546 if (status == ETIMEDOUT) {
1547 if (flag->is_sleeping()) {
1549 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1551 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit " 1555 }
else if (flag->is_sleeping()) {
1557 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1564 th->th.th_active = TRUE;
1565 if (TCR_4(th->th.th_in_pool)) {
1566 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1567 th->th.th_active_in_pool = TRUE;
1571 #ifdef DEBUG_SUSPEND 1574 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1575 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1580 __kmp_unlock_suspend_mx(th);
1581 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1584 template <
bool C,
bool S>
1585 void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag) {
1586 __kmp_suspend_template(th_gtid, flag);
1588 template <
bool C,
bool S>
1589 void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag) {
1590 __kmp_suspend_template(th_gtid, flag);
1592 void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1593 __kmp_suspend_template(th_gtid, flag);
1596 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1597 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1598 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1604 static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1605 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1606 kmp_info_t *th = __kmp_threads[target_gtid];
1610 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1613 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1614 gtid, target_gtid));
1615 KMP_DEBUG_ASSERT(gtid != target_gtid);
1617 __kmp_suspend_initialize_thread(th);
1619 __kmp_lock_suspend_mx(th);
1622 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1627 if (!flag || flag->get_type() != flag->get_ptr_type()) {
1629 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already " 1630 "awake: flag(%p)\n",
1631 gtid, target_gtid, NULL));
1632 __kmp_unlock_suspend_mx(th);
1636 typename C::flag_t old_spin = flag->unset_sleeping();
1637 if (!flag->is_sleeping_val(old_spin)) {
1638 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already " 1641 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1642 __kmp_unlock_suspend_mx(th);
1645 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset " 1646 "sleep bit for flag's loc(%p): " 1648 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1650 TCW_PTR(th->th.th_sleep_loc, NULL);
1652 #ifdef DEBUG_SUSPEND 1655 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1656 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1657 target_gtid, buffer);
1660 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1661 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1662 __kmp_unlock_suspend_mx(th);
1663 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up" 1665 gtid, target_gtid));
1668 template <
bool C,
bool S>
1669 void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag) {
1670 __kmp_resume_template(target_gtid, flag);
1672 template <
bool C,
bool S>
1673 void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag) {
1674 __kmp_resume_template(target_gtid, flag);
1676 void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1677 __kmp_resume_template(target_gtid, flag);
1680 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1681 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1684 void __kmp_resume_monitor() {
1685 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1688 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1689 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1691 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1693 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1694 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1695 #ifdef DEBUG_SUSPEND 1698 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1699 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1700 KMP_GTID_MONITOR, buffer);
1703 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1704 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1705 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1706 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1707 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up" 1709 gtid, KMP_GTID_MONITOR));
1711 #endif // KMP_USE_MONITOR 1713 void __kmp_yield() { sched_yield(); }
1715 void __kmp_gtid_set_specific(
int gtid) {
1716 if (__kmp_init_gtid) {
1718 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1719 (
void *)(intptr_t)(gtid + 1));
1720 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1722 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1726 int __kmp_gtid_get_specific() {
1728 if (!__kmp_init_gtid) {
1729 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning " 1730 "KMP_GTID_SHUTDOWN\n"));
1731 return KMP_GTID_SHUTDOWN;
1733 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1735 gtid = KMP_GTID_DNE;
1739 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1740 __kmp_gtid_threadprivate_key, gtid));
1744 double __kmp_read_cpu_time(
void) {
1750 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1751 (
double)CLOCKS_PER_SEC;
1754 int __kmp_read_system_info(
struct kmp_sys_info *info) {
1756 struct rusage r_usage;
1758 memset(info, 0,
sizeof(*info));
1760 status = getrusage(RUSAGE_SELF, &r_usage);
1761 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1764 info->maxrss = r_usage.ru_maxrss;
1766 info->minflt = r_usage.ru_minflt;
1768 info->majflt = r_usage.ru_majflt;
1770 info->nswap = r_usage.ru_nswap;
1772 info->inblock = r_usage.ru_inblock;
1774 info->oublock = r_usage.ru_oublock;
1776 info->nvcsw = r_usage.ru_nvcsw;
1778 info->nivcsw = r_usage.ru_nivcsw;
1780 return (status != 0);
1783 void __kmp_read_system_time(
double *delta) {
1785 struct timeval tval;
1786 struct timespec stop;
1789 status = gettimeofday(&tval, NULL);
1790 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1791 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1792 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1793 *delta = (t_ns * 1e-9);
1796 void __kmp_clear_system_time(
void) {
1797 struct timeval tval;
1799 status = gettimeofday(&tval, NULL);
1800 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1801 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1804 static int __kmp_get_xproc(
void) {
1808 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 1809 KMP_OS_OPENBSD || KMP_OS_HURD 1811 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1819 host_basic_info_data_t info;
1820 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1821 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1822 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1825 r = info.avail_cpus;
1827 KMP_WARNING(CantGetNumAvailCPU);
1828 KMP_INFORM(AssumedNumCPU);
1833 #error "Unknown or unsupported OS." 1837 return r > 0 ? r : 2;
1841 int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1845 va_start(args, format);
1846 FILE *f = fopen(path,
"rb");
1849 result = vfscanf(f, format, args);
1855 void __kmp_runtime_initialize(
void) {
1857 pthread_mutexattr_t mutex_attr;
1858 pthread_condattr_t cond_attr;
1860 if (__kmp_init_runtime) {
1864 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) 1865 if (!__kmp_cpuinfo.initialized) {
1866 __kmp_query_cpuid(&__kmp_cpuinfo);
1870 __kmp_xproc = __kmp_get_xproc();
1872 #if !KMP_32_BIT_ARCH 1876 status = getrlimit(RLIMIT_STACK, &rlim);
1878 __kmp_stksize = rlim.rlim_cur;
1879 __kmp_check_stksize(&__kmp_stksize);
1883 if (sysconf(_SC_THREADS)) {
1886 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1887 if (__kmp_sys_max_nth == -1) {
1889 __kmp_sys_max_nth = INT_MAX;
1890 }
else if (__kmp_sys_max_nth <= 1) {
1892 __kmp_sys_max_nth = KMP_MAX_NTH;
1896 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1897 if (__kmp_sys_min_stksize <= 1) {
1898 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1903 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1905 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1906 __kmp_internal_end_dest);
1907 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1908 status = pthread_mutexattr_init(&mutex_attr);
1909 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1910 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1911 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1912 status = pthread_mutexattr_destroy(&mutex_attr);
1913 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_destroy", status);
1914 status = pthread_condattr_init(&cond_attr);
1915 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1916 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1917 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1918 status = pthread_condattr_destroy(&cond_attr);
1919 KMP_CHECK_SYSFAIL(
"pthread_condattr_destroy", status);
1921 __kmp_itt_initialize();
1924 __kmp_init_runtime = TRUE;
1927 void __kmp_runtime_destroy(
void) {
1930 if (!__kmp_init_runtime) {
1935 __kmp_itt_destroy();
1938 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1939 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1941 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1942 if (status != 0 && status != EBUSY) {
1943 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1945 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1946 if (status != 0 && status != EBUSY) {
1947 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1949 #if KMP_AFFINITY_SUPPORTED 1950 __kmp_affinity_uninitialize();
1953 __kmp_init_runtime = FALSE;
1958 void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
1961 void __kmp_elapsed(
double *t) {
1963 #ifdef FIX_SGI_CLOCK 1966 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1967 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
1969 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
1973 status = gettimeofday(&tv, NULL);
1974 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1976 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
1981 void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1984 kmp_uint64 __kmp_now_nsec() {
1986 gettimeofday(&t, NULL);
1987 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1988 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1992 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 1994 void __kmp_initialize_system_tick() {
1995 kmp_uint64 now, nsec2, diff;
1996 kmp_uint64 delay = 100000;
1997 kmp_uint64 nsec = __kmp_now_nsec();
1998 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1999 while ((now = __kmp_hardware_timestamp()) < goal)
2001 nsec2 = __kmp_now_nsec();
2002 diff = nsec2 - nsec;
2004 kmp_uint64 tpms = ((kmp_uint64)1e6 * (delay + (now - goal)) / diff);
2006 __kmp_ticks_per_msec = tpms;
2014 int __kmp_is_address_mapped(
void *addr) {
2019 #if KMP_OS_LINUX || KMP_OS_HURD 2024 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2027 file = fopen(name,
"r");
2028 KMP_ASSERT(file != NULL);
2032 void *beginning = NULL;
2033 void *ending = NULL;
2036 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2040 KMP_ASSERT(rc == 3 &&
2041 KMP_STRLEN(perms) == 4);
2044 if ((addr >= beginning) && (addr < ending)) {
2046 if (strcmp(perms,
"rw") == 0) {
2056 KMP_INTERNAL_FREE(name);
2057 #elif KMP_OS_FREEBSD 2060 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2061 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2066 lstsz = lstsz * 4 / 3;
2067 buf =
reinterpret_cast<char *
>(kmpc_malloc(lstsz));
2068 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2075 char *up = buf + lstsz;
2078 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2079 size_t cursz = cur->kve_structsize;
2082 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2083 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2085 if ((addr >= start) && (addr < end)) {
2086 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2087 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2103 rc = vm_read_overwrite(
2105 (vm_address_t)(addr),
2107 (vm_address_t)(&buffer),
2120 mib[2] = VM_PROC_MAP;
2122 mib[4] =
sizeof(
struct kinfo_vmentry);
2125 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2129 size = size * 4 / 3;
2130 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2133 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2137 for (
size_t i = 0; i < size; i++) {
2138 if (kiv[i].kve_start >= (uint64_t)addr &&
2139 kiv[i].kve_end <= (uint64_t)addr) {
2144 KMP_INTERNAL_FREE(kiv);
2145 #elif KMP_OS_OPENBSD 2149 mib[1] = KERN_PROC_VMMAP;
2154 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2159 struct kinfo_vmentry kiv = {.kve_start = 0};
2161 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2163 if (kiv.kve_end == end)
2166 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2172 #elif KMP_OS_DRAGONFLY 2179 #error "Unknown or unsupported OS" 2187 #ifdef USE_LOAD_BALANCE 2189 #if KMP_OS_DARWIN || KMP_OS_NETBSD 2196 int __kmp_get_load_balance(
int max) {
2200 int res = getloadavg(averages, 3);
2205 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2206 ret_avg = (int)averages[0];
2207 }
else if ((__kmp_load_balance_interval >= 180 &&
2208 __kmp_load_balance_interval < 600) &&
2210 ret_avg = (int)averages[1];
2211 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2212 ret_avg = (int)averages[2];
2226 int __kmp_get_load_balance(
int max) {
2227 static int permanent_error = 0;
2228 static int glb_running_threads = 0;
2230 static double glb_call_time = 0;
2232 int running_threads = 0;
2234 DIR *proc_dir = NULL;
2235 struct dirent *proc_entry = NULL;
2237 kmp_str_buf_t task_path;
2238 DIR *task_dir = NULL;
2239 struct dirent *task_entry = NULL;
2240 int task_path_fixed_len;
2242 kmp_str_buf_t stat_path;
2244 int stat_path_fixed_len;
2246 int total_processes = 0;
2247 int total_threads = 0;
2249 double call_time = 0.0;
2251 __kmp_str_buf_init(&task_path);
2252 __kmp_str_buf_init(&stat_path);
2254 __kmp_elapsed(&call_time);
2256 if (glb_call_time &&
2257 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2258 running_threads = glb_running_threads;
2262 glb_call_time = call_time;
2265 if (permanent_error) {
2266 running_threads = -1;
2275 proc_dir = opendir(
"/proc");
2276 if (proc_dir == NULL) {
2279 running_threads = -1;
2280 permanent_error = 1;
2285 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2286 task_path_fixed_len = task_path.used;
2288 proc_entry = readdir(proc_dir);
2289 while (proc_entry != NULL) {
2292 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2300 KMP_DEBUG_ASSERT(total_processes != 1 ||
2301 strcmp(proc_entry->d_name,
"1") == 0);
2304 task_path.used = task_path_fixed_len;
2305 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2306 KMP_STRLEN(proc_entry->d_name));
2307 __kmp_str_buf_cat(&task_path,
"/task", 5);
2309 task_dir = opendir(task_path.str);
2310 if (task_dir == NULL) {
2319 if (strcmp(proc_entry->d_name,
"1") == 0) {
2320 running_threads = -1;
2321 permanent_error = 1;
2326 __kmp_str_buf_clear(&stat_path);
2327 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2328 __kmp_str_buf_cat(&stat_path,
"/", 1);
2329 stat_path_fixed_len = stat_path.used;
2331 task_entry = readdir(task_dir);
2332 while (task_entry != NULL) {
2334 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2342 stat_path_fixed_len;
2343 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2344 KMP_STRLEN(task_entry->d_name));
2345 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2349 stat_file = open(stat_path.str, O_RDONLY);
2350 if (stat_file == -1) {
2380 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2387 char *close_parent = strstr(buffer,
") ");
2388 if (close_parent != NULL) {
2389 char state = *(close_parent + 2);
2392 if (running_threads >= max) {
2402 task_entry = readdir(task_dir);
2408 proc_entry = readdir(proc_dir);
2414 KMP_DEBUG_ASSERT(running_threads > 0);
2415 if (running_threads <= 0) {
2416 running_threads = 1;
2420 if (proc_dir != NULL) {
2423 __kmp_str_buf_free(&task_path);
2424 if (task_dir != NULL) {
2427 __kmp_str_buf_free(&stat_path);
2428 if (stat_file != -1) {
2432 glb_running_threads = running_threads;
2434 return running_threads;
2438 #endif // KMP_OS_DARWIN 2440 #endif // USE_LOAD_BALANCE 2442 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \ 2443 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \ 2444 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64) 2448 int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2452 void **exit_frame_ptr
2456 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2461 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2465 (*pkfn)(>id, &tid);
2468 (*pkfn)(>id, &tid, p_argv[0]);
2471 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2474 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2477 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2480 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2483 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2487 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2488 p_argv[5], p_argv[6]);
2491 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2492 p_argv[5], p_argv[6], p_argv[7]);
2495 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2496 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2499 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2500 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2503 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2504 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2507 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2508 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2512 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2513 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2514 p_argv[11], p_argv[12]);
2517 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2518 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2519 p_argv[11], p_argv[12], p_argv[13]);
2522 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2523 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2524 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2536 pthread_cond_t hidden_helper_threads_initz_cond_var;
2537 pthread_mutex_t hidden_helper_threads_initz_lock;
2538 volatile int hidden_helper_initz_signaled = FALSE;
2541 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2542 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2543 volatile int hidden_helper_deinitz_signaled = FALSE;
2546 pthread_cond_t hidden_helper_main_thread_cond_var;
2547 pthread_mutex_t hidden_helper_main_thread_lock;
2548 volatile int hidden_helper_main_thread_signaled = FALSE;
2553 sem_t hidden_helper_task_sem;
2556 void __kmp_hidden_helper_worker_thread_wait() {
2557 int status = sem_wait(&hidden_helper_task_sem);
2558 KMP_CHECK_SYSFAIL(
"sem_wait", status);
2561 void __kmp_do_initialize_hidden_helper_threads() {
2564 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2565 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2567 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2568 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2570 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2571 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2573 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2574 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2576 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2577 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2579 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2580 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2583 status = sem_init(&hidden_helper_task_sem, 0, 0);
2584 KMP_CHECK_SYSFAIL(
"sem_init", status);
2588 status = pthread_create(
2590 [](
void *) ->
void * {
2591 __kmp_hidden_helper_threads_initz_routine();
2595 KMP_CHECK_SYSFAIL(
"pthread_create", status);
2598 void __kmp_hidden_helper_threads_initz_wait() {
2601 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2602 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2604 if (!TCR_4(hidden_helper_initz_signaled)) {
2605 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2606 &hidden_helper_threads_initz_lock);
2607 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2610 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2611 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2614 void __kmp_hidden_helper_initz_release() {
2616 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2617 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2619 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2620 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2622 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2624 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2625 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2628 void __kmp_hidden_helper_main_thread_wait() {
2631 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2632 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2634 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2635 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2636 &hidden_helper_main_thread_lock);
2637 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2640 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2641 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2644 void __kmp_hidden_helper_main_thread_release() {
2647 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2648 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2650 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2651 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
2654 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2656 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2657 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2660 void __kmp_hidden_helper_worker_thread_signal() {
2661 int status = sem_post(&hidden_helper_task_sem);
2662 KMP_CHECK_SYSFAIL(
"sem_post", status);
2665 void __kmp_hidden_helper_threads_deinitz_wait() {
2668 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2669 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2671 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2672 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2673 &hidden_helper_threads_deinitz_lock);
2674 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2677 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2678 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2681 void __kmp_hidden_helper_threads_deinitz_release() {
2682 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2683 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2685 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2686 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2688 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2690 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2691 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.