LLVM OpenMP* Runtime Library
kmp_tasking.cpp
1 /*
2  * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_i18n.h"
15 #include "kmp_itt.h"
16 #include "kmp_stats.h"
17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
19 
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 #if ENABLE_LIBOMPTARGET
25 static void (*tgt_target_nowait_query)(void **);
26 
27 void __kmp_init_target_task() {
28  *(void **)(&tgt_target_nowait_query) = KMP_DLSYM("__tgt_target_nowait_query");
29 }
30 #endif
31 
32 /* forward declaration */
33 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
34  kmp_info_t *this_thr);
35 static void __kmp_alloc_task_deque(kmp_info_t *thread,
36  kmp_thread_data_t *thread_data);
37 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
38  kmp_task_team_t *task_team);
39 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
40 #if OMPX_TASKGRAPH
41 static kmp_tdg_info_t *__kmp_find_tdg(kmp_int32 tdg_id);
42 int __kmp_taskloop_task(int gtid, void *ptask);
43 #endif
44 
45 #ifdef BUILD_TIED_TASK_STACK
46 
47 // __kmp_trace_task_stack: print the tied tasks from the task stack in order
48 // from top do bottom
49 //
50 // gtid: global thread identifier for thread containing stack
51 // thread_data: thread data for task team thread containing stack
52 // threshold: value above which the trace statement triggers
53 // location: string identifying call site of this function (for trace)
54 static void __kmp_trace_task_stack(kmp_int32 gtid,
55  kmp_thread_data_t *thread_data,
56  int threshold, char *location) {
57  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
58  kmp_taskdata_t **stack_top = task_stack->ts_top;
59  kmp_int32 entries = task_stack->ts_entries;
60  kmp_taskdata_t *tied_task;
61 
62  KA_TRACE(
63  threshold,
64  ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
65  "first_block = %p, stack_top = %p \n",
66  location, gtid, entries, task_stack->ts_first_block, stack_top));
67 
68  KMP_DEBUG_ASSERT(stack_top != NULL);
69  KMP_DEBUG_ASSERT(entries > 0);
70 
71  while (entries != 0) {
72  KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
73  // fix up ts_top if we need to pop from previous block
74  if (entries & TASK_STACK_INDEX_MASK == 0) {
75  kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
76 
77  stack_block = stack_block->sb_prev;
78  stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
79  }
80 
81  // finish bookkeeping
82  stack_top--;
83  entries--;
84 
85  tied_task = *stack_top;
86 
87  KMP_DEBUG_ASSERT(tied_task != NULL);
88  KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
89 
90  KA_TRACE(threshold,
91  ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
92  "stack_top=%p, tied_task=%p\n",
93  location, gtid, entries, stack_top, tied_task));
94  }
95  KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
96 
97  KA_TRACE(threshold,
98  ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
99  location, gtid));
100 }
101 
102 // __kmp_init_task_stack: initialize the task stack for the first time
103 // after a thread_data structure is created.
104 // It should not be necessary to do this again (assuming the stack works).
105 //
106 // gtid: global thread identifier of calling thread
107 // thread_data: thread data for task team thread containing stack
108 static void __kmp_init_task_stack(kmp_int32 gtid,
109  kmp_thread_data_t *thread_data) {
110  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
111  kmp_stack_block_t *first_block;
112 
113  // set up the first block of the stack
114  first_block = &task_stack->ts_first_block;
115  task_stack->ts_top = (kmp_taskdata_t **)first_block;
116  memset((void *)first_block, '\0',
117  TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
118 
119  // initialize the stack to be empty
120  task_stack->ts_entries = TASK_STACK_EMPTY;
121  first_block->sb_next = NULL;
122  first_block->sb_prev = NULL;
123 }
124 
125 // __kmp_free_task_stack: free the task stack when thread_data is destroyed.
126 //
127 // gtid: global thread identifier for calling thread
128 // thread_data: thread info for thread containing stack
129 static void __kmp_free_task_stack(kmp_int32 gtid,
130  kmp_thread_data_t *thread_data) {
131  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
132  kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
133 
134  KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
135  // free from the second block of the stack
136  while (stack_block != NULL) {
137  kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
138 
139  stack_block->sb_next = NULL;
140  stack_block->sb_prev = NULL;
141  if (stack_block != &task_stack->ts_first_block) {
142  __kmp_thread_free(thread,
143  stack_block); // free the block, if not the first
144  }
145  stack_block = next_block;
146  }
147  // initialize the stack to be empty
148  task_stack->ts_entries = 0;
149  task_stack->ts_top = NULL;
150 }
151 
152 // __kmp_push_task_stack: Push the tied task onto the task stack.
153 // Grow the stack if necessary by allocating another block.
154 //
155 // gtid: global thread identifier for calling thread
156 // thread: thread info for thread containing stack
157 // tied_task: the task to push on the stack
158 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
159  kmp_taskdata_t *tied_task) {
160  // GEH - need to consider what to do if tt_threads_data not allocated yet
161  kmp_thread_data_t *thread_data =
162  &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
163  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
164 
165  if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
166  return; // Don't push anything on stack if team or team tasks are serialized
167  }
168 
169  KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
170  KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
171 
172  KA_TRACE(20,
173  ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
174  gtid, thread, tied_task));
175  // Store entry
176  *(task_stack->ts_top) = tied_task;
177 
178  // Do bookkeeping for next push
179  task_stack->ts_top++;
180  task_stack->ts_entries++;
181 
182  if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
183  // Find beginning of this task block
184  kmp_stack_block_t *stack_block =
185  (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
186 
187  // Check if we already have a block
188  if (stack_block->sb_next !=
189  NULL) { // reset ts_top to beginning of next block
190  task_stack->ts_top = &stack_block->sb_next->sb_block[0];
191  } else { // Alloc new block and link it up
192  kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
193  thread, sizeof(kmp_stack_block_t));
194 
195  task_stack->ts_top = &new_block->sb_block[0];
196  stack_block->sb_next = new_block;
197  new_block->sb_prev = stack_block;
198  new_block->sb_next = NULL;
199 
200  KA_TRACE(
201  30,
202  ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
203  gtid, tied_task, new_block));
204  }
205  }
206  KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
207  tied_task));
208 }
209 
210 // __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return
211 // the task, just check to make sure it matches the ending task passed in.
212 //
213 // gtid: global thread identifier for the calling thread
214 // thread: thread info structure containing stack
215 // tied_task: the task popped off the stack
216 // ending_task: the task that is ending (should match popped task)
217 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
218  kmp_taskdata_t *ending_task) {
219  // GEH - need to consider what to do if tt_threads_data not allocated yet
220  kmp_thread_data_t *thread_data =
221  &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
222  kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
223  kmp_taskdata_t *tied_task;
224 
225  if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
226  // Don't pop anything from stack if team or team tasks are serialized
227  return;
228  }
229 
230  KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
231  KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
232 
233  KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
234  thread));
235 
236  // fix up ts_top if we need to pop from previous block
237  if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
238  kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
239 
240  stack_block = stack_block->sb_prev;
241  task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
242  }
243 
244  // finish bookkeeping
245  task_stack->ts_top--;
246  task_stack->ts_entries--;
247 
248  tied_task = *(task_stack->ts_top);
249 
250  KMP_DEBUG_ASSERT(tied_task != NULL);
251  KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
252  KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
253 
254  KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
255  tied_task));
256  return;
257 }
258 #endif /* BUILD_TIED_TASK_STACK */
259 
260 // returns 1 if new task is allowed to execute, 0 otherwise
261 // checks Task Scheduling constraint (if requested) and
262 // mutexinoutset dependencies if any
263 static bool __kmp_task_is_allowed(int gtid, const kmp_int32 is_constrained,
264  const kmp_taskdata_t *tasknew,
265  const kmp_taskdata_t *taskcurr) {
266  if (is_constrained && (tasknew->td_flags.tiedness == TASK_TIED)) {
267  // Check if the candidate obeys the Task Scheduling Constraints (TSC)
268  // only descendant of all deferred tied tasks can be scheduled, checking
269  // the last one is enough, as it in turn is the descendant of all others
270  kmp_taskdata_t *current = taskcurr->td_last_tied;
271  KMP_DEBUG_ASSERT(current != NULL);
272  // check if the task is not suspended on barrier
273  if (current->td_flags.tasktype == TASK_EXPLICIT ||
274  current->td_taskwait_thread > 0) { // <= 0 on barrier
275  kmp_int32 level = current->td_level;
276  kmp_taskdata_t *parent = tasknew->td_parent;
277  while (parent != current && parent->td_level > level) {
278  // check generation up to the level of the current task
279  parent = parent->td_parent;
280  KMP_DEBUG_ASSERT(parent != NULL);
281  }
282  if (parent != current)
283  return false;
284  }
285  }
286  // Check mutexinoutset dependencies, acquire locks
287  kmp_depnode_t *node = tasknew->td_depnode;
288 #if OMPX_TASKGRAPH
289  if (!tasknew->is_taskgraph && UNLIKELY(node && (node->dn.mtx_num_locks > 0))) {
290 #else
291  if (UNLIKELY(node && (node->dn.mtx_num_locks > 0))) {
292 #endif
293  for (int i = 0; i < node->dn.mtx_num_locks; ++i) {
294  KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
295  if (__kmp_test_lock(node->dn.mtx_locks[i], gtid))
296  continue;
297  // could not get the lock, release previous locks
298  for (int j = i - 1; j >= 0; --j)
299  __kmp_release_lock(node->dn.mtx_locks[j], gtid);
300  return false;
301  }
302  // negative num_locks means all locks acquired successfully
303  node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
304  }
305  return true;
306 }
307 
308 // __kmp_realloc_task_deque:
309 // Re-allocates a task deque for a particular thread, copies the content from
310 // the old deque and adjusts the necessary data structures relating to the
311 // deque. This operation must be done with the deque_lock being held
312 static void __kmp_realloc_task_deque(kmp_info_t *thread,
313  kmp_thread_data_t *thread_data) {
314  kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
315  KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == size);
316  kmp_int32 new_size = 2 * size;
317 
318  KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
319  "%d] for thread_data %p\n",
320  __kmp_gtid_from_thread(thread), size, new_size, thread_data));
321 
322  kmp_taskdata_t **new_deque =
323  (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
324 
325  int i, j;
326  for (i = thread_data->td.td_deque_head, j = 0; j < size;
327  i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
328  new_deque[j] = thread_data->td.td_deque[i];
329 
330  __kmp_free(thread_data->td.td_deque);
331 
332  thread_data->td.td_deque_head = 0;
333  thread_data->td.td_deque_tail = size;
334  thread_data->td.td_deque = new_deque;
335  thread_data->td.td_deque_size = new_size;
336 }
337 
338 static kmp_task_pri_t *__kmp_alloc_task_pri_list() {
339  kmp_task_pri_t *l = (kmp_task_pri_t *)__kmp_allocate(sizeof(kmp_task_pri_t));
340  kmp_thread_data_t *thread_data = &l->td;
341  __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
342  thread_data->td.td_deque_last_stolen = -1;
343  KE_TRACE(20, ("__kmp_alloc_task_pri_list: T#%d allocating deque[%d] "
344  "for thread_data %p\n",
345  __kmp_get_gtid(), INITIAL_TASK_DEQUE_SIZE, thread_data));
346  thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
347  INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
348  thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
349  return l;
350 }
351 
352 // The function finds the deque of priority tasks with given priority, or
353 // allocates a new deque and put it into sorted (high -> low) list of deques.
354 // Deques of non-default priority tasks are shared between all threads in team,
355 // as opposed to per-thread deques of tasks with default priority.
356 // The function is called under the lock task_team->tt.tt_task_pri_lock.
357 static kmp_thread_data_t *
358 __kmp_get_priority_deque_data(kmp_task_team_t *task_team, kmp_int32 pri) {
359  kmp_thread_data_t *thread_data;
360  kmp_task_pri_t *lst = task_team->tt.tt_task_pri_list;
361  if (lst->priority == pri) {
362  // Found queue of tasks with given priority.
363  thread_data = &lst->td;
364  } else if (lst->priority < pri) {
365  // All current priority queues contain tasks with lower priority.
366  // Allocate new one for given priority tasks.
367  kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
368  thread_data = &list->td;
369  list->priority = pri;
370  list->next = lst;
371  task_team->tt.tt_task_pri_list = list;
372  } else { // task_team->tt.tt_task_pri_list->priority > pri
373  kmp_task_pri_t *next_queue = lst->next;
374  while (next_queue && next_queue->priority > pri) {
375  lst = next_queue;
376  next_queue = lst->next;
377  }
378  // lst->priority > pri && (next == NULL || pri >= next->priority)
379  if (next_queue == NULL) {
380  // No queue with pri priority, need to allocate new one.
381  kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
382  thread_data = &list->td;
383  list->priority = pri;
384  list->next = NULL;
385  lst->next = list;
386  } else if (next_queue->priority == pri) {
387  // Found queue of tasks with given priority.
388  thread_data = &next_queue->td;
389  } else { // lst->priority > pri > next->priority
390  // insert newly allocated between existed queues
391  kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
392  thread_data = &list->td;
393  list->priority = pri;
394  list->next = next_queue;
395  lst->next = list;
396  }
397  }
398  return thread_data;
399 }
400 
401 // __kmp_push_priority_task: Add a task to the team's priority task deque
402 static kmp_int32 __kmp_push_priority_task(kmp_int32 gtid, kmp_info_t *thread,
403  kmp_taskdata_t *taskdata,
404  kmp_task_team_t *task_team,
405  kmp_int32 pri) {
406  kmp_thread_data_t *thread_data = NULL;
407  KA_TRACE(20,
408  ("__kmp_push_priority_task: T#%d trying to push task %p, pri %d.\n",
409  gtid, taskdata, pri));
410 
411  // Find task queue specific to priority value
412  kmp_task_pri_t *lst = task_team->tt.tt_task_pri_list;
413  if (UNLIKELY(lst == NULL)) {
414  __kmp_acquire_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
415  if (task_team->tt.tt_task_pri_list == NULL) {
416  // List of queues is still empty, allocate one.
417  kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
418  thread_data = &list->td;
419  list->priority = pri;
420  list->next = NULL;
421  task_team->tt.tt_task_pri_list = list;
422  } else {
423  // Other thread initialized a queue. Check if it fits and get thread_data.
424  thread_data = __kmp_get_priority_deque_data(task_team, pri);
425  }
426  __kmp_release_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
427  } else {
428  if (lst->priority == pri) {
429  // Found queue of tasks with given priority.
430  thread_data = &lst->td;
431  } else {
432  __kmp_acquire_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
433  thread_data = __kmp_get_priority_deque_data(task_team, pri);
434  __kmp_release_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
435  }
436  }
437  KMP_DEBUG_ASSERT(thread_data);
438 
439  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
440  // Check if deque is full
441  if (TCR_4(thread_data->td.td_deque_ntasks) >=
442  TASK_DEQUE_SIZE(thread_data->td)) {
443  if (__kmp_enable_task_throttling &&
444  __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
445  thread->th.th_current_task)) {
446  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
447  KA_TRACE(20, ("__kmp_push_priority_task: T#%d deque is full; returning "
448  "TASK_NOT_PUSHED for task %p\n",
449  gtid, taskdata));
450  return TASK_NOT_PUSHED;
451  } else {
452  // expand deque to push the task which is not allowed to execute
453  __kmp_realloc_task_deque(thread, thread_data);
454  }
455  }
456  KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
457  TASK_DEQUE_SIZE(thread_data->td));
458  // Push taskdata.
459  thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
460  // Wrap index.
461  thread_data->td.td_deque_tail =
462  (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
463  TCW_4(thread_data->td.td_deque_ntasks,
464  TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
465  KMP_FSYNC_RELEASING(thread->th.th_current_task); // releasing self
466  KMP_FSYNC_RELEASING(taskdata); // releasing child
467  KA_TRACE(20, ("__kmp_push_priority_task: T#%d returning "
468  "TASK_SUCCESSFULLY_PUSHED: task=%p ntasks=%d head=%u tail=%u\n",
469  gtid, taskdata, thread_data->td.td_deque_ntasks,
470  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
471  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
472  task_team->tt.tt_num_task_pri++; // atomic inc
473  return TASK_SUCCESSFULLY_PUSHED;
474 }
475 
476 // __kmp_push_task: Add a task to the thread's deque
477 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
478  kmp_info_t *thread = __kmp_threads[gtid];
479  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
480 
481  // If we encounter a hidden helper task, and the current thread is not a
482  // hidden helper thread, we have to give the task to any hidden helper thread
483  // starting from its shadow one.
484  if (UNLIKELY(taskdata->td_flags.hidden_helper &&
485  !KMP_HIDDEN_HELPER_THREAD(gtid))) {
486  kmp_int32 shadow_gtid = KMP_GTID_TO_SHADOW_GTID(gtid);
487  __kmpc_give_task(task, __kmp_tid_from_gtid(shadow_gtid));
488  // Signal the hidden helper threads.
489  __kmp_hidden_helper_worker_thread_signal();
490  return TASK_SUCCESSFULLY_PUSHED;
491  }
492 
493  kmp_task_team_t *task_team = thread->th.th_task_team;
494  kmp_int32 tid = __kmp_tid_from_gtid(gtid);
495  kmp_thread_data_t *thread_data;
496 
497  KA_TRACE(20,
498  ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
499 
500  if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
501  // untied task needs to increment counter so that the task structure is not
502  // freed prematurely
503  kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
504  KMP_DEBUG_USE_VAR(counter);
505  KA_TRACE(
506  20,
507  ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
508  gtid, counter, taskdata));
509  }
510 
511  // The first check avoids building task_team thread data if serialized
512  if (UNLIKELY(taskdata->td_flags.task_serial)) {
513  KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
514  "TASK_NOT_PUSHED for task %p\n",
515  gtid, taskdata));
516  return TASK_NOT_PUSHED;
517  }
518 
519  // Now that serialized tasks have returned, we can assume that we are not in
520  // immediate exec mode
521  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
522  if (UNLIKELY(!KMP_TASKING_ENABLED(task_team))) {
523  __kmp_enable_tasking(task_team, thread);
524  }
525  KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
526  KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
527 
528  if (taskdata->td_flags.priority_specified && task->data2.priority > 0 &&
529  __kmp_max_task_priority > 0) {
530  int pri = KMP_MIN(task->data2.priority, __kmp_max_task_priority);
531  return __kmp_push_priority_task(gtid, thread, taskdata, task_team, pri);
532  }
533 
534  // Find tasking deque specific to encountering thread
535  thread_data = &task_team->tt.tt_threads_data[tid];
536 
537  // No lock needed since only owner can allocate. If the task is hidden_helper,
538  // we don't need it either because we have initialized the dequeue for hidden
539  // helper thread data.
540  if (UNLIKELY(thread_data->td.td_deque == NULL)) {
541  __kmp_alloc_task_deque(thread, thread_data);
542  }
543 
544  int locked = 0;
545  // Check if deque is full
546  if (TCR_4(thread_data->td.td_deque_ntasks) >=
547  TASK_DEQUE_SIZE(thread_data->td)) {
548  if (__kmp_enable_task_throttling &&
549  __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
550  thread->th.th_current_task)) {
551  KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
552  "TASK_NOT_PUSHED for task %p\n",
553  gtid, taskdata));
554  return TASK_NOT_PUSHED;
555  } else {
556  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
557  locked = 1;
558  if (TCR_4(thread_data->td.td_deque_ntasks) >=
559  TASK_DEQUE_SIZE(thread_data->td)) {
560  // expand deque to push the task which is not allowed to execute
561  __kmp_realloc_task_deque(thread, thread_data);
562  }
563  }
564  }
565  // Lock the deque for the task push operation
566  if (!locked) {
567  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
568  // Need to recheck as we can get a proxy task from thread outside of OpenMP
569  if (TCR_4(thread_data->td.td_deque_ntasks) >=
570  TASK_DEQUE_SIZE(thread_data->td)) {
571  if (__kmp_enable_task_throttling &&
572  __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
573  thread->th.th_current_task)) {
574  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
575  KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; "
576  "returning TASK_NOT_PUSHED for task %p\n",
577  gtid, taskdata));
578  return TASK_NOT_PUSHED;
579  } else {
580  // expand deque to push the task which is not allowed to execute
581  __kmp_realloc_task_deque(thread, thread_data);
582  }
583  }
584  }
585  // Must have room since no thread can add tasks but calling thread
586  KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
587  TASK_DEQUE_SIZE(thread_data->td));
588 
589  thread_data->td.td_deque[thread_data->td.td_deque_tail] =
590  taskdata; // Push taskdata
591  // Wrap index.
592  thread_data->td.td_deque_tail =
593  (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
594  TCW_4(thread_data->td.td_deque_ntasks,
595  TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
596  KMP_FSYNC_RELEASING(thread->th.th_current_task); // releasing self
597  KMP_FSYNC_RELEASING(taskdata); // releasing child
598  KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
599  "task=%p ntasks=%d head=%u tail=%u\n",
600  gtid, taskdata, thread_data->td.td_deque_ntasks,
601  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
602 
603  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
604 
605  return TASK_SUCCESSFULLY_PUSHED;
606 }
607 
608 // __kmp_pop_current_task_from_thread: set up current task from called thread
609 // when team ends
610 //
611 // this_thr: thread structure to set current_task in.
612 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
613  KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
614  "this_thread=%p, curtask=%p, "
615  "curtask_parent=%p\n",
616  0, this_thr, this_thr->th.th_current_task,
617  this_thr->th.th_current_task->td_parent));
618 
619  this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
620 
621  KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
622  "this_thread=%p, curtask=%p, "
623  "curtask_parent=%p\n",
624  0, this_thr, this_thr->th.th_current_task,
625  this_thr->th.th_current_task->td_parent));
626 }
627 
628 // __kmp_push_current_task_to_thread: set up current task in called thread for a
629 // new team
630 //
631 // this_thr: thread structure to set up
632 // team: team for implicit task data
633 // tid: thread within team to set up
634 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
635  int tid) {
636  // current task of the thread is a parent of the new just created implicit
637  // tasks of new team
638  KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
639  "curtask=%p "
640  "parent_task=%p\n",
641  tid, this_thr, this_thr->th.th_current_task,
642  team->t.t_implicit_task_taskdata[tid].td_parent));
643 
644  KMP_DEBUG_ASSERT(this_thr != NULL);
645 
646  if (tid == 0) {
647  if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
648  team->t.t_implicit_task_taskdata[0].td_parent =
649  this_thr->th.th_current_task;
650  this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
651  }
652  } else {
653  team->t.t_implicit_task_taskdata[tid].td_parent =
654  team->t.t_implicit_task_taskdata[0].td_parent;
655  this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
656  }
657 
658  KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
659  "curtask=%p "
660  "parent_task=%p\n",
661  tid, this_thr, this_thr->th.th_current_task,
662  team->t.t_implicit_task_taskdata[tid].td_parent));
663 }
664 
665 // __kmp_task_start: bookkeeping for a task starting execution
666 //
667 // GTID: global thread id of calling thread
668 // task: task starting execution
669 // current_task: task suspending
670 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
671  kmp_taskdata_t *current_task) {
672  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
673  kmp_info_t *thread = __kmp_threads[gtid];
674 
675  KA_TRACE(10,
676  ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
677  gtid, taskdata, current_task));
678 
679  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
680 
681  // mark currently executing task as suspended
682  // TODO: GEH - make sure root team implicit task is initialized properly.
683  // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
684  current_task->td_flags.executing = 0;
685 
686 // Add task to stack if tied
687 #ifdef BUILD_TIED_TASK_STACK
688  if (taskdata->td_flags.tiedness == TASK_TIED) {
689  __kmp_push_task_stack(gtid, thread, taskdata);
690  }
691 #endif /* BUILD_TIED_TASK_STACK */
692 
693  // mark starting task as executing and as current task
694  thread->th.th_current_task = taskdata;
695 
696  KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
697  taskdata->td_flags.tiedness == TASK_UNTIED);
698  KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
699  taskdata->td_flags.tiedness == TASK_UNTIED);
700  taskdata->td_flags.started = 1;
701  taskdata->td_flags.executing = 1;
702  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
703  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
704 
705  // GEH TODO: shouldn't we pass some sort of location identifier here?
706  // APT: yes, we will pass location here.
707  // need to store current thread state (in a thread or taskdata structure)
708  // before setting work_state, otherwise wrong state is set after end of task
709 
710  KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
711 
712  return;
713 }
714 
715 #if OMPT_SUPPORT
716 //------------------------------------------------------------------------------
717 // __ompt_task_init:
718 // Initialize OMPT fields maintained by a task. This will only be called after
719 // ompt_start_tool, so we already know whether ompt is enabled or not.
720 
721 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
722  // The calls to __ompt_task_init already have the ompt_enabled condition.
723  task->ompt_task_info.task_data.value = 0;
724  task->ompt_task_info.frame.exit_frame = ompt_data_none;
725  task->ompt_task_info.frame.enter_frame = ompt_data_none;
726  task->ompt_task_info.frame.exit_frame_flags =
727  ompt_frame_runtime | ompt_frame_framepointer;
728  task->ompt_task_info.frame.enter_frame_flags =
729  ompt_frame_runtime | ompt_frame_framepointer;
730  task->ompt_task_info.dispatch_chunk.start = 0;
731  task->ompt_task_info.dispatch_chunk.iterations = 0;
732 }
733 
734 // __ompt_task_start:
735 // Build and trigger task-begin event
736 static inline void __ompt_task_start(kmp_task_t *task,
737  kmp_taskdata_t *current_task,
738  kmp_int32 gtid) {
739  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
740  ompt_task_status_t status = ompt_task_switch;
741  if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
742  status = ompt_task_yield;
743  __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
744  }
745  /* let OMPT know that we're about to run this task */
746  if (ompt_enabled.ompt_callback_task_schedule) {
747  ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
748  &(current_task->ompt_task_info.task_data), status,
749  &(taskdata->ompt_task_info.task_data));
750  }
751  taskdata->ompt_task_info.scheduling_parent = current_task;
752 }
753 
754 // __ompt_task_finish:
755 // Build and trigger final task-schedule event
756 static inline void __ompt_task_finish(kmp_task_t *task,
757  kmp_taskdata_t *resumed_task,
758  ompt_task_status_t status) {
759  if (ompt_enabled.ompt_callback_task_schedule) {
760  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
761  if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
762  taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
763  status = ompt_task_cancel;
764  }
765 
766  /* let OMPT know that we're returning to the callee task */
767  ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
768  &(taskdata->ompt_task_info.task_data), status,
769  (resumed_task ? &(resumed_task->ompt_task_info.task_data) : NULL));
770  }
771 }
772 #endif
773 
774 template <bool ompt>
775 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
776  kmp_task_t *task,
777  void *frame_address,
778  void *return_address) {
779  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
780  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
781 
782  KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
783  "current_task=%p\n",
784  gtid, loc_ref, taskdata, current_task));
785 
786  if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
787  // untied task needs to increment counter so that the task structure is not
788  // freed prematurely
789  kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
790  KMP_DEBUG_USE_VAR(counter);
791  KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
792  "incremented for task %p\n",
793  gtid, counter, taskdata));
794  }
795 
796  taskdata->td_flags.task_serial =
797  1; // Execute this task immediately, not deferred.
798  __kmp_task_start(gtid, task, current_task);
799 
800 #if OMPT_SUPPORT
801  if (ompt) {
802  if (current_task->ompt_task_info.frame.enter_frame.ptr == NULL) {
803  current_task->ompt_task_info.frame.enter_frame.ptr =
804  taskdata->ompt_task_info.frame.exit_frame.ptr = frame_address;
805  current_task->ompt_task_info.frame.enter_frame_flags =
806  taskdata->ompt_task_info.frame.exit_frame_flags =
807  ompt_frame_application | ompt_frame_framepointer;
808  }
809  if (ompt_enabled.ompt_callback_task_create) {
810  ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
811  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
812  &(parent_info->task_data), &(parent_info->frame),
813  &(taskdata->ompt_task_info.task_data),
814  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
815  return_address);
816  }
817  __ompt_task_start(task, current_task, gtid);
818  }
819 #endif // OMPT_SUPPORT
820 
821  KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
822  loc_ref, taskdata));
823 }
824 
825 #if OMPT_SUPPORT
826 OMPT_NOINLINE
827 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
828  kmp_task_t *task,
829  void *frame_address,
830  void *return_address) {
831  __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
832  return_address);
833 }
834 #endif // OMPT_SUPPORT
835 
836 // __kmpc_omp_task_begin_if0: report that a given serialized task has started
837 // execution
838 //
839 // loc_ref: source location information; points to beginning of task block.
840 // gtid: global thread number.
841 // task: task thunk for the started task.
842 #ifdef __s390x__
843 // This is required for OMPT_GET_FRAME_ADDRESS(1) to compile on s390x.
844 // In order for it to work correctly, the caller also needs to be compiled with
845 // backchain. If a caller is compiled without backchain,
846 // OMPT_GET_FRAME_ADDRESS(1) will produce an incorrect value, but will not
847 // crash.
848 __attribute__((target("backchain")))
849 #endif
850 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
851  kmp_task_t *task) {
852 #if OMPT_SUPPORT
853  if (UNLIKELY(ompt_enabled.enabled)) {
854  OMPT_STORE_RETURN_ADDRESS(gtid);
855  __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
856  OMPT_GET_FRAME_ADDRESS(1),
857  OMPT_LOAD_RETURN_ADDRESS(gtid));
858  return;
859  }
860 #endif
861  __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
862 }
863 
864 #ifdef TASK_UNUSED
865 // __kmpc_omp_task_begin: report that a given task has started execution
866 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
867 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
868  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
869 
870  KA_TRACE(
871  10,
872  ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
873  gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
874 
875  __kmp_task_start(gtid, task, current_task);
876 
877  KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
878  loc_ref, KMP_TASK_TO_TASKDATA(task)));
879  return;
880 }
881 #endif // TASK_UNUSED
882 
883 // __kmp_free_task: free the current task space and the space for shareds
884 //
885 // gtid: Global thread ID of calling thread
886 // taskdata: task to free
887 // thread: thread data structure of caller
888 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
889  kmp_info_t *thread) {
890  KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
891  taskdata));
892 
893  // Check to make sure all flags and counters have the correct values
894  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
895  KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
896  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
897  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
898  KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
899  taskdata->td_flags.task_serial == 1);
900  KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
901  kmp_task_t *task = KMP_TASKDATA_TO_TASK(taskdata);
902  // Clear data to not be re-used later by mistake.
903  task->data1.destructors = NULL;
904  task->data2.priority = 0;
905 
906  taskdata->td_flags.freed = 1;
907 #if OMPX_TASKGRAPH
908  // do not free tasks in taskgraph
909  if (!taskdata->is_taskgraph) {
910 #endif
911 // deallocate the taskdata and shared variable blocks associated with this task
912 #if USE_FAST_MEMORY
913  __kmp_fast_free(thread, taskdata);
914 #else /* ! USE_FAST_MEMORY */
915  __kmp_thread_free(thread, taskdata);
916 #endif
917 #if OMPX_TASKGRAPH
918  } else {
919  taskdata->td_flags.complete = 0;
920  taskdata->td_flags.started = 0;
921  taskdata->td_flags.freed = 0;
922  taskdata->td_flags.executing = 0;
923  taskdata->td_flags.task_serial =
924  (taskdata->td_parent->td_flags.final ||
925  taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser);
926 
927  // taskdata->td_allow_completion_event.pending_events_count = 1;
928  KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
929  KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
930  // start at one because counts current task and children
931  KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
932  }
933 #endif
934 
935  KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
936 }
937 
938 // __kmp_free_task_and_ancestors: free the current task and ancestors without
939 // children
940 //
941 // gtid: Global thread ID of calling thread
942 // taskdata: task to free
943 // thread: thread data structure of caller
944 static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
945  kmp_taskdata_t *taskdata,
946  kmp_info_t *thread) {
947  // Proxy tasks must always be allowed to free their parents
948  // because they can be run in background even in serial mode.
949  kmp_int32 team_serial =
950  (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
951  !taskdata->td_flags.proxy;
952  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
953 
954  kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
955  KMP_DEBUG_ASSERT(children >= 0);
956 
957  // Now, go up the ancestor tree to see if any ancestors can now be freed.
958  while (children == 0) {
959  kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
960 
961  KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
962  "and freeing itself\n",
963  gtid, taskdata));
964 
965  // --- Deallocate my ancestor task ---
966  __kmp_free_task(gtid, taskdata, thread);
967 
968  taskdata = parent_taskdata;
969 
970  if (team_serial)
971  return;
972  // Stop checking ancestors at implicit task instead of walking up ancestor
973  // tree to avoid premature deallocation of ancestors.
974  if (taskdata->td_flags.tasktype == TASK_IMPLICIT) {
975  if (taskdata->td_dephash) { // do we need to cleanup dephash?
976  int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks);
977  kmp_tasking_flags_t flags_old = taskdata->td_flags;
978  if (children == 0 && flags_old.complete == 1) {
979  kmp_tasking_flags_t flags_new = flags_old;
980  flags_new.complete = 0;
981  if (KMP_COMPARE_AND_STORE_ACQ32(
982  RCAST(kmp_int32 *, &taskdata->td_flags),
983  *RCAST(kmp_int32 *, &flags_old),
984  *RCAST(kmp_int32 *, &flags_new))) {
985  KA_TRACE(100, ("__kmp_free_task_and_ancestors: T#%d cleans "
986  "dephash of implicit task %p\n",
987  gtid, taskdata));
988  // cleanup dephash of finished implicit task
989  __kmp_dephash_free_entries(thread, taskdata->td_dephash);
990  }
991  }
992  }
993  return;
994  }
995  // Predecrement simulated by "- 1" calculation
996  children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
997  KMP_DEBUG_ASSERT(children >= 0);
998  }
999 
1000  KA_TRACE(
1001  20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
1002  "not freeing it yet\n",
1003  gtid, taskdata, children));
1004 }
1005 
1006 // Only need to keep track of child task counts if any of the following:
1007 // 1. team parallel and tasking not serialized;
1008 // 2. it is a proxy or detachable or hidden helper task
1009 // 3. the children counter of its parent task is greater than 0.
1010 // The reason for the 3rd one is for serialized team that found detached task,
1011 // hidden helper task, T. In this case, the execution of T is still deferred,
1012 // and it is also possible that a regular task depends on T. In this case, if we
1013 // don't track the children, task synchronization will be broken.
1014 static bool __kmp_track_children_task(kmp_taskdata_t *taskdata) {
1015  kmp_tasking_flags_t flags = taskdata->td_flags;
1016  bool ret = !(flags.team_serial || flags.tasking_ser);
1017  ret = ret || flags.proxy == TASK_PROXY ||
1018  flags.detachable == TASK_DETACHABLE || flags.hidden_helper;
1019  ret = ret ||
1020  KMP_ATOMIC_LD_ACQ(&taskdata->td_parent->td_incomplete_child_tasks) > 0;
1021 #if OMPX_TASKGRAPH
1022  if (taskdata->td_taskgroup && taskdata->is_taskgraph)
1023  ret = ret || KMP_ATOMIC_LD_ACQ(&taskdata->td_taskgroup->count) > 0;
1024 #endif
1025  return ret;
1026 }
1027 
1028 // __kmp_task_finish: bookkeeping to do when a task finishes execution
1029 //
1030 // gtid: global thread ID for calling thread
1031 // task: task to be finished
1032 // resumed_task: task to be resumed. (may be NULL if task is serialized)
1033 //
1034 // template<ompt>: effectively ompt_enabled.enabled!=0
1035 // the version with ompt=false is inlined, allowing to optimize away all ompt
1036 // code in this case
1037 template <bool ompt>
1038 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
1039  kmp_taskdata_t *resumed_task) {
1040  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1041  kmp_info_t *thread = __kmp_threads[gtid];
1042  kmp_task_team_t *task_team =
1043  thread->th.th_task_team; // might be NULL for serial teams...
1044 #if OMPX_TASKGRAPH
1045  // to avoid seg fault when we need to access taskdata->td_flags after free when using vanilla taskloop
1046  bool is_taskgraph;
1047 #endif
1048 #if KMP_DEBUG
1049  kmp_int32 children = 0;
1050 #endif
1051  KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
1052  "task %p\n",
1053  gtid, taskdata, resumed_task));
1054 
1055  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
1056 
1057 #if OMPX_TASKGRAPH
1058  is_taskgraph = taskdata->is_taskgraph;
1059 #endif
1060 
1061 // Pop task from stack if tied
1062 #ifdef BUILD_TIED_TASK_STACK
1063  if (taskdata->td_flags.tiedness == TASK_TIED) {
1064  __kmp_pop_task_stack(gtid, thread, taskdata);
1065  }
1066 #endif /* BUILD_TIED_TASK_STACK */
1067 
1068  if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
1069  // untied task needs to check the counter so that the task structure is not
1070  // freed prematurely
1071  kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
1072  KA_TRACE(
1073  20,
1074  ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
1075  gtid, counter, taskdata));
1076  if (counter > 0) {
1077  // untied task is not done, to be continued possibly by other thread, do
1078  // not free it now
1079  if (resumed_task == NULL) {
1080  KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
1081  resumed_task = taskdata->td_parent; // In a serialized task, the resumed
1082  // task is the parent
1083  }
1084  thread->th.th_current_task = resumed_task; // restore current_task
1085  resumed_task->td_flags.executing = 1; // resume previous task
1086  KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
1087  "resuming task %p\n",
1088  gtid, taskdata, resumed_task));
1089  return;
1090  }
1091  }
1092 
1093  // bookkeeping for resuming task:
1094  // GEH - note tasking_ser => task_serial
1095  KMP_DEBUG_ASSERT(
1096  (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
1097  taskdata->td_flags.task_serial);
1098  if (taskdata->td_flags.task_serial) {
1099  if (resumed_task == NULL) {
1100  resumed_task = taskdata->td_parent; // In a serialized task, the resumed
1101  // task is the parent
1102  }
1103  } else {
1104  KMP_DEBUG_ASSERT(resumed_task !=
1105  NULL); // verify that resumed task is passed as argument
1106  }
1107 
1108  /* If the tasks' destructor thunk flag has been set, we need to invoke the
1109  destructor thunk that has been generated by the compiler. The code is
1110  placed here, since at this point other tasks might have been released
1111  hence overlapping the destructor invocations with some other work in the
1112  released tasks. The OpenMP spec is not specific on when the destructors
1113  are invoked, so we should be free to choose. */
1114  if (UNLIKELY(taskdata->td_flags.destructors_thunk)) {
1115  kmp_routine_entry_t destr_thunk = task->data1.destructors;
1116  KMP_ASSERT(destr_thunk);
1117  destr_thunk(gtid, task);
1118  }
1119 
1120  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
1121  KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
1122  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
1123 
1124  bool completed = true;
1125  if (UNLIKELY(taskdata->td_flags.detachable == TASK_DETACHABLE)) {
1126  if (taskdata->td_allow_completion_event.type ==
1127  KMP_EVENT_ALLOW_COMPLETION) {
1128  // event hasn't been fulfilled yet. Try to detach task.
1129  __kmp_acquire_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
1130  if (taskdata->td_allow_completion_event.type ==
1131  KMP_EVENT_ALLOW_COMPLETION) {
1132  // task finished execution
1133  KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
1134  taskdata->td_flags.executing = 0; // suspend the finishing task
1135 
1136 #if OMPT_SUPPORT
1137  // For a detached task, which is not completed, we switch back
1138  // the omp_fulfill_event signals completion
1139  // locking is necessary to avoid a race with ompt_task_late_fulfill
1140  if (ompt)
1141  __ompt_task_finish(task, resumed_task, ompt_task_detach);
1142 #endif
1143 
1144  // no access to taskdata after this point!
1145  // __kmp_fulfill_event might free taskdata at any time from now
1146 
1147  taskdata->td_flags.proxy = TASK_PROXY; // proxify!
1148  completed = false;
1149  }
1150  __kmp_release_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
1151  }
1152  }
1153 
1154  // Tasks with valid target async handles must be re-enqueued.
1155  if (taskdata->td_target_data.async_handle != NULL) {
1156  // Note: no need to translate gtid to its shadow. If the current thread is a
1157  // hidden helper one, then the gtid is already correct. Otherwise, hidden
1158  // helper threads are disabled, and gtid refers to a OpenMP thread.
1159  __kmpc_give_task(task, __kmp_tid_from_gtid(gtid));
1160  if (KMP_HIDDEN_HELPER_THREAD(gtid))
1161  __kmp_hidden_helper_worker_thread_signal();
1162  completed = false;
1163  }
1164 
1165  if (completed) {
1166  taskdata->td_flags.complete = 1; // mark the task as completed
1167 #if OMPX_TASKGRAPH
1168  taskdata->td_flags.onced = 1; // mark the task as ran once already
1169 #endif
1170 
1171 #if OMPT_SUPPORT
1172  // This is not a detached task, we are done here
1173  if (ompt)
1174  __ompt_task_finish(task, resumed_task, ompt_task_complete);
1175 #endif
1176  // TODO: What would be the balance between the conditions in the function
1177  // and an atomic operation?
1178  if (__kmp_track_children_task(taskdata)) {
1179  __kmp_release_deps(gtid, taskdata);
1180  // Predecrement simulated by "- 1" calculation
1181 #if KMP_DEBUG
1182  children = -1 +
1183 #endif
1184  KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
1185  KMP_DEBUG_ASSERT(children >= 0);
1186 #if OMPX_TASKGRAPH
1187  if (taskdata->td_taskgroup && !taskdata->is_taskgraph)
1188 #else
1189  if (taskdata->td_taskgroup)
1190 #endif
1191  KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
1192  } else if (task_team && (task_team->tt.tt_found_proxy_tasks ||
1193  task_team->tt.tt_hidden_helper_task_encountered)) {
1194  // if we found proxy or hidden helper tasks there could exist a dependency
1195  // chain with the proxy task as origin
1196  __kmp_release_deps(gtid, taskdata);
1197  }
1198  // td_flags.executing must be marked as 0 after __kmp_release_deps has been
1199  // called. Othertwise, if a task is executed immediately from the
1200  // release_deps code, the flag will be reset to 1 again by this same
1201  // function
1202  KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
1203  taskdata->td_flags.executing = 0; // suspend the finishing task
1204 
1205  // Decrement the counter of hidden helper tasks to be executed.
1206  if (taskdata->td_flags.hidden_helper) {
1207  // Hidden helper tasks can only be executed by hidden helper threads.
1208  KMP_ASSERT(KMP_HIDDEN_HELPER_THREAD(gtid));
1209  KMP_ATOMIC_DEC(&__kmp_unexecuted_hidden_helper_tasks);
1210  }
1211  }
1212 
1213  KA_TRACE(
1214  20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
1215  gtid, taskdata, children));
1216 
1217  // Free this task and then ancestor tasks if they have no children.
1218  // Restore th_current_task first as suggested by John:
1219  // johnmc: if an asynchronous inquiry peers into the runtime system
1220  // it doesn't see the freed task as the current task.
1221  thread->th.th_current_task = resumed_task;
1222  if (completed)
1223  __kmp_free_task_and_ancestors(gtid, taskdata, thread);
1224 
1225  // TODO: GEH - make sure root team implicit task is initialized properly.
1226  // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
1227  resumed_task->td_flags.executing = 1; // resume previous task
1228 
1229 #if OMPX_TASKGRAPH
1230  if (is_taskgraph && __kmp_track_children_task(taskdata) &&
1231  taskdata->td_taskgroup) {
1232  // TDG: we only release taskgroup barrier here because
1233  // free_task_and_ancestors will call
1234  // __kmp_free_task, which resets all task parameters such as
1235  // taskdata->started, etc. If we release the barrier earlier, these
1236  // parameters could be read before being reset. This is not an issue for
1237  // non-TDG implementation because we never reuse a task(data) structure
1238  KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
1239  }
1240 #endif
1241 
1242  KA_TRACE(
1243  10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
1244  gtid, taskdata, resumed_task));
1245 
1246  return;
1247 }
1248 
1249 template <bool ompt>
1250 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
1251  kmp_int32 gtid,
1252  kmp_task_t *task) {
1253  KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
1254  gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
1255  KMP_DEBUG_ASSERT(gtid >= 0);
1256  // this routine will provide task to resume
1257  __kmp_task_finish<ompt>(gtid, task, NULL);
1258 
1259  KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
1260  gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
1261 
1262 #if OMPT_SUPPORT
1263  if (ompt) {
1264  ompt_frame_t *ompt_frame;
1265  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
1266  ompt_frame->enter_frame = ompt_data_none;
1267  ompt_frame->enter_frame_flags =
1268  ompt_frame_runtime | ompt_frame_framepointer;
1269  }
1270 #endif
1271 
1272  return;
1273 }
1274 
1275 #if OMPT_SUPPORT
1276 OMPT_NOINLINE
1277 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
1278  kmp_task_t *task) {
1279  __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
1280 }
1281 #endif // OMPT_SUPPORT
1282 
1283 // __kmpc_omp_task_complete_if0: report that a task has completed execution
1284 //
1285 // loc_ref: source location information; points to end of task block.
1286 // gtid: global thread number.
1287 // task: task thunk for the completed task.
1288 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
1289  kmp_task_t *task) {
1290 #if OMPT_SUPPORT
1291  if (UNLIKELY(ompt_enabled.enabled)) {
1292  __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
1293  return;
1294  }
1295 #endif
1296  __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
1297 }
1298 
1299 #ifdef TASK_UNUSED
1300 // __kmpc_omp_task_complete: report that a task has completed execution
1301 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
1302 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
1303  kmp_task_t *task) {
1304  KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
1305  loc_ref, KMP_TASK_TO_TASKDATA(task)));
1306 
1307  __kmp_task_finish<false>(gtid, task,
1308  NULL); // Not sure how to find task to resume
1309 
1310  KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
1311  loc_ref, KMP_TASK_TO_TASKDATA(task)));
1312  return;
1313 }
1314 #endif // TASK_UNUSED
1315 
1316 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
1317 // task for a given thread
1318 //
1319 // loc_ref: reference to source location of parallel region
1320 // this_thr: thread data structure corresponding to implicit task
1321 // team: team for this_thr
1322 // tid: thread id of given thread within team
1323 // set_curr_task: TRUE if need to push current task to thread
1324 // NOTE: Routine does not set up the implicit task ICVS. This is assumed to
1325 // have already been done elsewhere.
1326 // TODO: Get better loc_ref. Value passed in may be NULL
1327 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
1328  kmp_team_t *team, int tid, int set_curr_task) {
1329  kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
1330 
1331  KF_TRACE(
1332  10,
1333  ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
1334  tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
1335 
1336  task->td_task_id = KMP_GEN_TASK_ID();
1337  task->td_team = team;
1338  // task->td_parent = NULL; // fix for CQ230101 (broken parent task info
1339  // in debugger)
1340  task->td_ident = loc_ref;
1341  task->td_taskwait_ident = NULL;
1342  task->td_taskwait_counter = 0;
1343  task->td_taskwait_thread = 0;
1344 
1345  task->td_flags.tiedness = TASK_TIED;
1346  task->td_flags.tasktype = TASK_IMPLICIT;
1347  task->td_flags.proxy = TASK_FULL;
1348 
1349  // All implicit tasks are executed immediately, not deferred
1350  task->td_flags.task_serial = 1;
1351  task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1352  task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1353 
1354  task->td_flags.started = 1;
1355  task->td_flags.executing = 1;
1356  task->td_flags.complete = 0;
1357  task->td_flags.freed = 0;
1358 #if OMPX_TASKGRAPH
1359  task->td_flags.onced = 0;
1360 #endif
1361 
1362  task->td_depnode = NULL;
1363  task->td_last_tied = task;
1364  task->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1365 
1366  if (set_curr_task) { // only do this init first time thread is created
1367  KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0);
1368  // Not used: don't need to deallocate implicit task
1369  KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0);
1370  task->td_taskgroup = NULL; // An implicit task does not have taskgroup
1371  task->td_dephash = NULL;
1372  __kmp_push_current_task_to_thread(this_thr, team, tid);
1373  } else {
1374  KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
1375  KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
1376  }
1377 
1378 #if OMPT_SUPPORT
1379  if (UNLIKELY(ompt_enabled.enabled))
1380  __ompt_task_init(task, tid);
1381 #endif
1382 
1383  KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
1384  team, task));
1385 }
1386 
1387 // __kmp_finish_implicit_task: Release resources associated to implicit tasks
1388 // at the end of parallel regions. Some resources are kept for reuse in the next
1389 // parallel region.
1390 //
1391 // thread: thread data structure corresponding to implicit task
1392 void __kmp_finish_implicit_task(kmp_info_t *thread) {
1393  kmp_taskdata_t *task = thread->th.th_current_task;
1394  if (task->td_dephash) {
1395  int children;
1396  task->td_flags.complete = 1;
1397 #if OMPX_TASKGRAPH
1398  task->td_flags.onced = 1;
1399 #endif
1400  children = KMP_ATOMIC_LD_ACQ(&task->td_incomplete_child_tasks);
1401  kmp_tasking_flags_t flags_old = task->td_flags;
1402  if (children == 0 && flags_old.complete == 1) {
1403  kmp_tasking_flags_t flags_new = flags_old;
1404  flags_new.complete = 0;
1405  if (KMP_COMPARE_AND_STORE_ACQ32(RCAST(kmp_int32 *, &task->td_flags),
1406  *RCAST(kmp_int32 *, &flags_old),
1407  *RCAST(kmp_int32 *, &flags_new))) {
1408  KA_TRACE(100, ("__kmp_finish_implicit_task: T#%d cleans "
1409  "dephash of implicit task %p\n",
1410  thread->th.th_info.ds.ds_gtid, task));
1411  __kmp_dephash_free_entries(thread, task->td_dephash);
1412  }
1413  }
1414  }
1415 }
1416 
1417 // __kmp_free_implicit_task: Release resources associated to implicit tasks
1418 // when these are destroyed regions
1419 //
1420 // thread: thread data structure corresponding to implicit task
1421 void __kmp_free_implicit_task(kmp_info_t *thread) {
1422  kmp_taskdata_t *task = thread->th.th_current_task;
1423  if (task && task->td_dephash) {
1424  __kmp_dephash_free(thread, task->td_dephash);
1425  task->td_dephash = NULL;
1426  }
1427 }
1428 
1429 // Round up a size to a power of two specified by val: Used to insert padding
1430 // between structures co-allocated using a single malloc() call
1431 static size_t __kmp_round_up_to_val(size_t size, size_t val) {
1432  if (size & (val - 1)) {
1433  size &= ~(val - 1);
1434  if (size <= KMP_SIZE_T_MAX - val) {
1435  size += val; // Round up if there is no overflow.
1436  }
1437  }
1438  return size;
1439 } // __kmp_round_up_to_va
1440 
1441 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1442 //
1443 // loc_ref: source location information
1444 // gtid: global thread number.
1445 // flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1446 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1447 // sizeof_kmp_task_t: Size in bytes of kmp_task_t data structure including
1448 // private vars accessed in task.
1449 // sizeof_shareds: Size in bytes of array of pointers to shared vars accessed
1450 // in task.
1451 // task_entry: Pointer to task code entry point generated by compiler.
1452 // returns: a pointer to the allocated kmp_task_t structure (task).
1453 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1454  kmp_tasking_flags_t *flags,
1455  size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1456  kmp_routine_entry_t task_entry) {
1457  kmp_task_t *task;
1458  kmp_taskdata_t *taskdata;
1459  kmp_info_t *thread = __kmp_threads[gtid];
1460  kmp_team_t *team = thread->th.th_team;
1461  kmp_taskdata_t *parent_task = thread->th.th_current_task;
1462  size_t shareds_offset;
1463 
1464  if (UNLIKELY(!TCR_4(__kmp_init_middle)))
1465  __kmp_middle_initialize();
1466 
1467  if (flags->hidden_helper) {
1468  if (__kmp_enable_hidden_helper) {
1469  if (!TCR_4(__kmp_init_hidden_helper))
1470  __kmp_hidden_helper_initialize();
1471  } else {
1472  // If the hidden helper task is not enabled, reset the flag to FALSE.
1473  flags->hidden_helper = FALSE;
1474  }
1475  }
1476 
1477  KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1478  "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1479  gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1480  sizeof_shareds, task_entry));
1481 
1482  KMP_DEBUG_ASSERT(parent_task);
1483  if (parent_task->td_flags.final) {
1484  if (flags->merged_if0) {
1485  }
1486  flags->final = 1;
1487  }
1488 
1489  if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1490  // Untied task encountered causes the TSC algorithm to check entire deque of
1491  // the victim thread. If no untied task encountered, then checking the head
1492  // of the deque should be enough.
1493  KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1494  }
1495 
1496  // Detachable tasks are not proxy tasks yet but could be in the future. Doing
1497  // the tasking setup
1498  // when that happens is too late.
1499  if (UNLIKELY(flags->proxy == TASK_PROXY ||
1500  flags->detachable == TASK_DETACHABLE || flags->hidden_helper)) {
1501  if (flags->proxy == TASK_PROXY) {
1502  flags->tiedness = TASK_UNTIED;
1503  flags->merged_if0 = 1;
1504  }
1505  /* are we running in a sequential parallel or tskm_immediate_exec... we need
1506  tasking support enabled */
1507  if ((thread->th.th_task_team) == NULL) {
1508  /* This should only happen if the team is serialized
1509  setup a task team and propagate it to the thread */
1510  KMP_DEBUG_ASSERT(team->t.t_serialized);
1511  KA_TRACE(30,
1512  ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1513  gtid));
1514  // 1 indicates setup the current team regardless of nthreads
1515  __kmp_task_team_setup(thread, team, 1);
1516  thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1517  }
1518  kmp_task_team_t *task_team = thread->th.th_task_team;
1519 
1520  /* tasking must be enabled now as the task might not be pushed */
1521  if (!KMP_TASKING_ENABLED(task_team)) {
1522  KA_TRACE(
1523  30,
1524  ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1525  __kmp_enable_tasking(task_team, thread);
1526  kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1527  kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1528  // No lock needed since only owner can allocate
1529  if (thread_data->td.td_deque == NULL) {
1530  __kmp_alloc_task_deque(thread, thread_data);
1531  }
1532  }
1533 
1534  if ((flags->proxy == TASK_PROXY || flags->detachable == TASK_DETACHABLE) &&
1535  task_team->tt.tt_found_proxy_tasks == FALSE)
1536  TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1537  if (flags->hidden_helper &&
1538  task_team->tt.tt_hidden_helper_task_encountered == FALSE)
1539  TCW_4(task_team->tt.tt_hidden_helper_task_encountered, TRUE);
1540  }
1541 
1542  // Calculate shared structure offset including padding after kmp_task_t struct
1543  // to align pointers in shared struct
1544  shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1545  shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1546 
1547  // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1548  KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1549  shareds_offset));
1550  KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1551  sizeof_shareds));
1552 
1553  // Avoid double allocation here by combining shareds with taskdata
1554 #if USE_FAST_MEMORY
1555  taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1556  sizeof_shareds);
1557 #else /* ! USE_FAST_MEMORY */
1558  taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1559  sizeof_shareds);
1560 #endif /* USE_FAST_MEMORY */
1561 
1562  task = KMP_TASKDATA_TO_TASK(taskdata);
1563 
1564 // Make sure task & taskdata are aligned appropriately
1565 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || KMP_ARCH_S390X || !KMP_HAVE_QUAD
1566  KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1567  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1568 #else
1569  KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1570  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1571 #endif
1572  if (sizeof_shareds > 0) {
1573  // Avoid double allocation here by combining shareds with taskdata
1574  task->shareds = &((char *)taskdata)[shareds_offset];
1575  // Make sure shareds struct is aligned to pointer size
1576  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1577  0);
1578  } else {
1579  task->shareds = NULL;
1580  }
1581  task->routine = task_entry;
1582  task->part_id = 0; // AC: Always start with 0 part id
1583 
1584  taskdata->td_task_id = KMP_GEN_TASK_ID();
1585  taskdata->td_team = thread->th.th_team;
1586  taskdata->td_alloc_thread = thread;
1587  taskdata->td_parent = parent_task;
1588  taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1589  KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1590  taskdata->td_ident = loc_ref;
1591  taskdata->td_taskwait_ident = NULL;
1592  taskdata->td_taskwait_counter = 0;
1593  taskdata->td_taskwait_thread = 0;
1594  KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1595  // avoid copying icvs for proxy tasks
1596  if (flags->proxy == TASK_FULL)
1597  copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1598 
1599  taskdata->td_flags = *flags;
1600  taskdata->td_task_team = thread->th.th_task_team;
1601  taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1602  taskdata->td_flags.tasktype = TASK_EXPLICIT;
1603  // If it is hidden helper task, we need to set the team and task team
1604  // correspondingly.
1605  if (flags->hidden_helper) {
1606  kmp_info_t *shadow_thread = __kmp_threads[KMP_GTID_TO_SHADOW_GTID(gtid)];
1607  taskdata->td_team = shadow_thread->th.th_team;
1608  taskdata->td_task_team = shadow_thread->th.th_task_team;
1609  }
1610 
1611  // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1612  taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1613 
1614  // GEH - TODO: fix this to copy parent task's value of team_serial flag
1615  taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1616 
1617  // GEH - Note we serialize the task if the team is serialized to make sure
1618  // implicit parallel region tasks are not left until program termination to
1619  // execute. Also, it helps locality to execute immediately.
1620 
1621  taskdata->td_flags.task_serial =
1622  (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1623  taskdata->td_flags.tasking_ser || flags->merged_if0);
1624 
1625  taskdata->td_flags.started = 0;
1626  taskdata->td_flags.executing = 0;
1627  taskdata->td_flags.complete = 0;
1628  taskdata->td_flags.freed = 0;
1629 #if OMPX_TASKGRAPH
1630  taskdata->td_flags.onced = 0;
1631 #endif
1632  KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1633  // start at one because counts current task and children
1634  KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1635  taskdata->td_taskgroup =
1636  parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1637  taskdata->td_dephash = NULL;
1638  taskdata->td_depnode = NULL;
1639  taskdata->td_target_data.async_handle = NULL;
1640  if (flags->tiedness == TASK_UNTIED)
1641  taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1642  else
1643  taskdata->td_last_tied = taskdata;
1644  taskdata->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1645 #if OMPT_SUPPORT
1646  if (UNLIKELY(ompt_enabled.enabled))
1647  __ompt_task_init(taskdata, gtid);
1648 #endif
1649  // TODO: What would be the balance between the conditions in the function and
1650  // an atomic operation?
1651  if (__kmp_track_children_task(taskdata)) {
1652  KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
1653  if (parent_task->td_taskgroup)
1654  KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
1655  // Only need to keep track of allocated child tasks for explicit tasks since
1656  // implicit not deallocated
1657  if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1658  KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1659  }
1660  if (flags->hidden_helper) {
1661  taskdata->td_flags.task_serial = FALSE;
1662  // Increment the number of hidden helper tasks to be executed
1663  KMP_ATOMIC_INC(&__kmp_unexecuted_hidden_helper_tasks);
1664  }
1665  }
1666 
1667 #if OMPX_TASKGRAPH
1668  kmp_tdg_info_t *tdg = __kmp_find_tdg(__kmp_curr_tdg_idx);
1669  if (tdg && __kmp_tdg_is_recording(tdg->tdg_status) &&
1670  (task_entry != (kmp_routine_entry_t)__kmp_taskloop_task)) {
1671  taskdata->is_taskgraph = 1;
1672  taskdata->tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
1673  taskdata->td_task_id = KMP_ATOMIC_INC(&__kmp_tdg_task_id);
1674  }
1675 #endif
1676  KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1677  gtid, taskdata, taskdata->td_parent));
1678 
1679  return task;
1680 }
1681 
1682 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1683  kmp_int32 flags, size_t sizeof_kmp_task_t,
1684  size_t sizeof_shareds,
1685  kmp_routine_entry_t task_entry) {
1686  kmp_task_t *retval;
1687  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1688  __kmp_assert_valid_gtid(gtid);
1689  input_flags->native = FALSE;
1690  // __kmp_task_alloc() sets up all other runtime flags
1691  KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s %s) "
1692  "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1693  gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
1694  input_flags->proxy ? "proxy" : "",
1695  input_flags->detachable ? "detachable" : "", sizeof_kmp_task_t,
1696  sizeof_shareds, task_entry));
1697 
1698  retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1699  sizeof_shareds, task_entry);
1700 
1701  KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1702 
1703  return retval;
1704 }
1705 
1706 kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1707  kmp_int32 flags,
1708  size_t sizeof_kmp_task_t,
1709  size_t sizeof_shareds,
1710  kmp_routine_entry_t task_entry,
1711  kmp_int64 device_id) {
1712  auto &input_flags = reinterpret_cast<kmp_tasking_flags_t &>(flags);
1713  // target task is untied defined in the specification
1714  input_flags.tiedness = TASK_UNTIED;
1715 
1716  if (__kmp_enable_hidden_helper)
1717  input_flags.hidden_helper = TRUE;
1718 
1719  return __kmpc_omp_task_alloc(loc_ref, gtid, flags, sizeof_kmp_task_t,
1720  sizeof_shareds, task_entry);
1721 }
1722 
1736 kmp_int32
1738  kmp_task_t *new_task, kmp_int32 naffins,
1739  kmp_task_affinity_info_t *affin_list) {
1740  return 0;
1741 }
1742 
1743 // __kmp_invoke_task: invoke the specified task
1744 //
1745 // gtid: global thread ID of caller
1746 // task: the task to invoke
1747 // current_task: the task to resume after task invocation
1748 #ifdef __s390x__
1749 __attribute__((target("backchain")))
1750 #endif
1751 static void
1752 __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1753  kmp_taskdata_t *current_task) {
1754  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1755  kmp_info_t *thread;
1756  int discard = 0 /* false */;
1757  KA_TRACE(
1758  30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1759  gtid, taskdata, current_task));
1760  KMP_DEBUG_ASSERT(task);
1761  if (UNLIKELY(taskdata->td_flags.proxy == TASK_PROXY &&
1762  taskdata->td_flags.complete == 1)) {
1763  // This is a proxy task that was already completed but it needs to run
1764  // its bottom-half finish
1765  KA_TRACE(
1766  30,
1767  ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1768  gtid, taskdata));
1769 
1770  __kmp_bottom_half_finish_proxy(gtid, task);
1771 
1772  KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1773  "proxy task %p, resuming task %p\n",
1774  gtid, taskdata, current_task));
1775 
1776  return;
1777  }
1778 
1779 #if OMPT_SUPPORT
1780  // For untied tasks, the first task executed only calls __kmpc_omp_task and
1781  // does not execute code.
1782  ompt_thread_info_t oldInfo;
1783  if (UNLIKELY(ompt_enabled.enabled)) {
1784  // Store the threads states and restore them after the task
1785  thread = __kmp_threads[gtid];
1786  oldInfo = thread->th.ompt_thread_info;
1787  thread->th.ompt_thread_info.wait_id = 0;
1788  thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1789  ? ompt_state_work_serial
1790  : ompt_state_work_parallel;
1791  taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1792  }
1793 #endif
1794 
1795  // Proxy tasks are not handled by the runtime
1796  if (taskdata->td_flags.proxy != TASK_PROXY) {
1797  __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1798  }
1799 
1800  // TODO: cancel tasks if the parallel region has also been cancelled
1801  // TODO: check if this sequence can be hoisted above __kmp_task_start
1802  // if cancellation has been enabled for this run ...
1803  if (UNLIKELY(__kmp_omp_cancellation)) {
1804  thread = __kmp_threads[gtid];
1805  kmp_team_t *this_team = thread->th.th_team;
1806  kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1807  if ((taskgroup && taskgroup->cancel_request) ||
1808  (this_team->t.t_cancel_request == cancel_parallel)) {
1809 #if OMPT_SUPPORT && OMPT_OPTIONAL
1810  ompt_data_t *task_data;
1811  if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1812  __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1813  ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1814  task_data,
1815  ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1816  : ompt_cancel_parallel) |
1817  ompt_cancel_discarded_task,
1818  NULL);
1819  }
1820 #endif
1821  KMP_COUNT_BLOCK(TASK_cancelled);
1822  // this task belongs to a task group and we need to cancel it
1823  discard = 1 /* true */;
1824  }
1825  }
1826 
1827  // Invoke the task routine and pass in relevant data.
1828  // Thunks generated by gcc take a different argument list.
1829  if (!discard) {
1830  if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1831  taskdata->td_last_tied = current_task->td_last_tied;
1832  KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1833  }
1834 #if KMP_STATS_ENABLED
1835  KMP_COUNT_BLOCK(TASK_executed);
1836  switch (KMP_GET_THREAD_STATE()) {
1837  case FORK_JOIN_BARRIER:
1838  KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1839  break;
1840  case PLAIN_BARRIER:
1841  KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1842  break;
1843  case TASKYIELD:
1844  KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1845  break;
1846  case TASKWAIT:
1847  KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1848  break;
1849  case TASKGROUP:
1850  KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1851  break;
1852  default:
1853  KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1854  break;
1855  }
1856 #endif // KMP_STATS_ENABLED
1857 
1858 // OMPT task begin
1859 #if OMPT_SUPPORT
1860  if (UNLIKELY(ompt_enabled.enabled))
1861  __ompt_task_start(task, current_task, gtid);
1862 #endif
1863 #if OMPT_SUPPORT && OMPT_OPTIONAL
1864  if (UNLIKELY(ompt_enabled.ompt_callback_dispatch &&
1865  taskdata->ompt_task_info.dispatch_chunk.iterations > 0)) {
1866  ompt_data_t instance = ompt_data_none;
1867  instance.ptr = &(taskdata->ompt_task_info.dispatch_chunk);
1868  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
1869  ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
1870  &(team_info->parallel_data), &(taskdata->ompt_task_info.task_data),
1871  ompt_dispatch_taskloop_chunk, instance);
1872  taskdata->ompt_task_info.dispatch_chunk = {0, 0};
1873  }
1874 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1875 
1876 #if OMPD_SUPPORT
1877  if (ompd_state & OMPD_ENABLE_BP)
1878  ompd_bp_task_begin();
1879 #endif
1880 
1881 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1882  kmp_uint64 cur_time;
1883  kmp_int32 kmp_itt_count_task =
1884  __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1885  current_task->td_flags.tasktype == TASK_IMPLICIT;
1886  if (kmp_itt_count_task) {
1887  thread = __kmp_threads[gtid];
1888  // Time outer level explicit task on barrier for adjusting imbalance time
1889  if (thread->th.th_bar_arrive_time)
1890  cur_time = __itt_get_timestamp();
1891  else
1892  kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1893  }
1894  KMP_FSYNC_ACQUIRED(taskdata); // acquired self (new task)
1895 #endif
1896 
1897 #if ENABLE_LIBOMPTARGET
1898  if (taskdata->td_target_data.async_handle != NULL) {
1899  // If we have a valid target async handle, that means that we have already
1900  // executed the task routine once. We must query for the handle completion
1901  // instead of re-executing the routine.
1902  KMP_ASSERT(tgt_target_nowait_query);
1903  tgt_target_nowait_query(&taskdata->td_target_data.async_handle);
1904  } else
1905 #endif
1906  if (task->routine != NULL) {
1907 #ifdef KMP_GOMP_COMPAT
1908  if (taskdata->td_flags.native) {
1909  ((void (*)(void *))(*(task->routine)))(task->shareds);
1910  } else
1911 #endif /* KMP_GOMP_COMPAT */
1912  {
1913  (*(task->routine))(gtid, task);
1914  }
1915  }
1916  KMP_POP_PARTITIONED_TIMER();
1917 
1918 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1919  if (kmp_itt_count_task) {
1920  // Barrier imbalance - adjust arrive time with the task duration
1921  thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1922  }
1923  KMP_FSYNC_CANCEL(taskdata); // destroy self (just executed)
1924  KMP_FSYNC_RELEASING(taskdata->td_parent); // releasing parent
1925 #endif
1926  }
1927 
1928 #if OMPD_SUPPORT
1929  if (ompd_state & OMPD_ENABLE_BP)
1930  ompd_bp_task_end();
1931 #endif
1932 
1933  // Proxy tasks are not handled by the runtime
1934  if (taskdata->td_flags.proxy != TASK_PROXY) {
1935 #if OMPT_SUPPORT
1936  if (UNLIKELY(ompt_enabled.enabled)) {
1937  thread->th.ompt_thread_info = oldInfo;
1938  if (taskdata->td_flags.tiedness == TASK_TIED) {
1939  taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1940  }
1941  __kmp_task_finish<true>(gtid, task, current_task);
1942  } else
1943 #endif
1944  __kmp_task_finish<false>(gtid, task, current_task);
1945  }
1946 
1947  KA_TRACE(
1948  30,
1949  ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1950  gtid, taskdata, current_task));
1951  return;
1952 }
1953 
1954 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1955 //
1956 // loc_ref: location of original task pragma (ignored)
1957 // gtid: Global Thread ID of encountering thread
1958 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1959 // Returns:
1960 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1961 // be resumed later.
1962 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1963 // resumed later.
1964 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1965  kmp_task_t *new_task) {
1966  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1967 
1968  KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1969  loc_ref, new_taskdata));
1970 
1971 #if OMPT_SUPPORT
1972  kmp_taskdata_t *parent;
1973  if (UNLIKELY(ompt_enabled.enabled)) {
1974  parent = new_taskdata->td_parent;
1975  if (ompt_enabled.ompt_callback_task_create) {
1976  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1977  &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame),
1978  &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1979  OMPT_GET_RETURN_ADDRESS(0));
1980  }
1981  }
1982 #endif
1983 
1984  /* Should we execute the new task or queue it? For now, let's just always try
1985  to queue it. If the queue fills up, then we'll execute it. */
1986 
1987  if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1988  { // Execute this task immediately
1989  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1990  new_taskdata->td_flags.task_serial = 1;
1991  __kmp_invoke_task(gtid, new_task, current_task);
1992  }
1993 
1994  KA_TRACE(
1995  10,
1996  ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1997  "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1998  gtid, loc_ref, new_taskdata));
1999 
2000 #if OMPT_SUPPORT
2001  if (UNLIKELY(ompt_enabled.enabled)) {
2002  parent->ompt_task_info.frame.enter_frame = ompt_data_none;
2003  }
2004 #endif
2005  return TASK_CURRENT_NOT_QUEUED;
2006 }
2007 
2008 // __kmp_omp_task: Schedule a non-thread-switchable task for execution
2009 //
2010 // gtid: Global Thread ID of encountering thread
2011 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
2012 // serialize_immediate: if TRUE then if the task is executed immediately its
2013 // execution will be serialized
2014 // Returns:
2015 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
2016 // be resumed later.
2017 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
2018 // resumed later.
2019 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
2020  bool serialize_immediate) {
2021  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
2022 
2023 #if OMPX_TASKGRAPH
2024  if (new_taskdata->is_taskgraph &&
2025  __kmp_tdg_is_recording(new_taskdata->tdg->tdg_status)) {
2026  kmp_tdg_info_t *tdg = new_taskdata->tdg;
2027  // extend the record_map if needed
2028  if (new_taskdata->td_task_id >= new_taskdata->tdg->map_size) {
2029  __kmp_acquire_bootstrap_lock(&tdg->graph_lock);
2030  // map_size could have been updated by another thread if recursive
2031  // taskloop
2032  if (new_taskdata->td_task_id >= tdg->map_size) {
2033  kmp_uint old_size = tdg->map_size;
2034  kmp_uint new_size = old_size * 2;
2035  kmp_node_info_t *old_record = tdg->record_map;
2036  kmp_node_info_t *new_record = (kmp_node_info_t *)__kmp_allocate(
2037  new_size * sizeof(kmp_node_info_t));
2038 
2039  KMP_MEMCPY(new_record, old_record, old_size * sizeof(kmp_node_info_t));
2040  tdg->record_map = new_record;
2041 
2042  __kmp_free(old_record);
2043 
2044  for (kmp_int i = old_size; i < new_size; i++) {
2045  kmp_int32 *successorsList = (kmp_int32 *)__kmp_allocate(
2046  __kmp_successors_size * sizeof(kmp_int32));
2047  new_record[i].task = nullptr;
2048  new_record[i].successors = successorsList;
2049  new_record[i].nsuccessors = 0;
2050  new_record[i].npredecessors = 0;
2051  new_record[i].successors_size = __kmp_successors_size;
2052  KMP_ATOMIC_ST_REL(&new_record[i].npredecessors_counter, 0);
2053  }
2054  // update the size at the end, so that we avoid other
2055  // threads use old_record while map_size is already updated
2056  tdg->map_size = new_size;
2057  }
2058  __kmp_release_bootstrap_lock(&tdg->graph_lock);
2059  }
2060  // record a task
2061  if (tdg->record_map[new_taskdata->td_task_id].task == nullptr) {
2062  tdg->record_map[new_taskdata->td_task_id].task = new_task;
2063  tdg->record_map[new_taskdata->td_task_id].parent_task =
2064  new_taskdata->td_parent;
2065  KMP_ATOMIC_INC(&tdg->num_tasks);
2066  }
2067  }
2068 #endif
2069 
2070  /* Should we execute the new task or queue it? For now, let's just always try
2071  to queue it. If the queue fills up, then we'll execute it. */
2072  if (new_taskdata->td_flags.proxy == TASK_PROXY ||
2073  __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
2074  { // Execute this task immediately
2075  kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
2076  if (serialize_immediate)
2077  new_taskdata->td_flags.task_serial = 1;
2078  __kmp_invoke_task(gtid, new_task, current_task);
2079  } else if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME &&
2080  __kmp_wpolicy_passive) {
2081  kmp_info_t *this_thr = __kmp_threads[gtid];
2082  kmp_team_t *team = this_thr->th.th_team;
2083  kmp_int32 nthreads = this_thr->th.th_team_nproc;
2084  for (int i = 0; i < nthreads; ++i) {
2085  kmp_info_t *thread = team->t.t_threads[i];
2086  if (thread == this_thr)
2087  continue;
2088  if (thread->th.th_sleep_loc != NULL) {
2089  __kmp_null_resume_wrapper(thread);
2090  break; // awake one thread at a time
2091  }
2092  }
2093  }
2094  return TASK_CURRENT_NOT_QUEUED;
2095 }
2096 
2097 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
2098 // non-thread-switchable task from the parent thread only!
2099 //
2100 // loc_ref: location of original task pragma (ignored)
2101 // gtid: Global Thread ID of encountering thread
2102 // new_task: non-thread-switchable task thunk allocated by
2103 // __kmp_omp_task_alloc()
2104 // Returns:
2105 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
2106 // be resumed later.
2107 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
2108 // resumed later.
2109 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
2110  kmp_task_t *new_task) {
2111  kmp_int32 res;
2112  KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
2113 
2114 #if KMP_DEBUG || OMPT_SUPPORT
2115  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
2116 #endif
2117  KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
2118  new_taskdata));
2119  __kmp_assert_valid_gtid(gtid);
2120 
2121 #if OMPT_SUPPORT
2122  kmp_taskdata_t *parent = NULL;
2123  if (UNLIKELY(ompt_enabled.enabled)) {
2124  if (!new_taskdata->td_flags.started) {
2125  OMPT_STORE_RETURN_ADDRESS(gtid);
2126  parent = new_taskdata->td_parent;
2127  if (!parent->ompt_task_info.frame.enter_frame.ptr) {
2128  parent->ompt_task_info.frame.enter_frame.ptr =
2129  OMPT_GET_FRAME_ADDRESS(0);
2130  }
2131  if (ompt_enabled.ompt_callback_task_create) {
2132  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
2133  &(parent->ompt_task_info.task_data),
2134  &(parent->ompt_task_info.frame),
2135  &(new_taskdata->ompt_task_info.task_data),
2136  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
2137  OMPT_LOAD_RETURN_ADDRESS(gtid));
2138  }
2139  } else {
2140  // We are scheduling the continuation of an UNTIED task.
2141  // Scheduling back to the parent task.
2142  __ompt_task_finish(new_task,
2143  new_taskdata->ompt_task_info.scheduling_parent,
2144  ompt_task_switch);
2145  new_taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
2146  }
2147  }
2148 #endif
2149 
2150  res = __kmp_omp_task(gtid, new_task, true);
2151 
2152  KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
2153  "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
2154  gtid, loc_ref, new_taskdata));
2155 #if OMPT_SUPPORT
2156  if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
2157  parent->ompt_task_info.frame.enter_frame = ompt_data_none;
2158  }
2159 #endif
2160  return res;
2161 }
2162 
2163 // __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
2164 // a taskloop task with the correct OMPT return address
2165 //
2166 // loc_ref: location of original task pragma (ignored)
2167 // gtid: Global Thread ID of encountering thread
2168 // new_task: non-thread-switchable task thunk allocated by
2169 // __kmp_omp_task_alloc()
2170 // codeptr_ra: return address for OMPT callback
2171 // Returns:
2172 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
2173 // be resumed later.
2174 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
2175 // resumed later.
2176 kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
2177  kmp_task_t *new_task, void *codeptr_ra) {
2178  kmp_int32 res;
2179  KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
2180 
2181 #if KMP_DEBUG || OMPT_SUPPORT
2182  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
2183 #endif
2184  KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
2185  new_taskdata));
2186 
2187 #if OMPT_SUPPORT
2188  kmp_taskdata_t *parent = NULL;
2189  if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
2190  parent = new_taskdata->td_parent;
2191  if (!parent->ompt_task_info.frame.enter_frame.ptr)
2192  parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
2193  if (ompt_enabled.ompt_callback_task_create) {
2194  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
2195  &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame),
2196  &(new_taskdata->ompt_task_info.task_data),
2197  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
2198  codeptr_ra);
2199  }
2200  }
2201 #endif
2202 
2203  res = __kmp_omp_task(gtid, new_task, true);
2204 
2205  KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
2206  "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
2207  gtid, loc_ref, new_taskdata));
2208 #if OMPT_SUPPORT
2209  if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
2210  parent->ompt_task_info.frame.enter_frame = ompt_data_none;
2211  }
2212 #endif
2213  return res;
2214 }
2215 
2216 template <bool ompt>
2217 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
2218  void *frame_address,
2219  void *return_address) {
2220  kmp_taskdata_t *taskdata = nullptr;
2221  kmp_info_t *thread;
2222  int thread_finished = FALSE;
2223  KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
2224 
2225  KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
2226  KMP_DEBUG_ASSERT(gtid >= 0);
2227 
2228  if (__kmp_tasking_mode != tskm_immediate_exec) {
2229  thread = __kmp_threads[gtid];
2230  taskdata = thread->th.th_current_task;
2231 
2232 #if OMPT_SUPPORT && OMPT_OPTIONAL
2233  ompt_data_t *my_task_data;
2234  ompt_data_t *my_parallel_data;
2235 
2236  if (ompt) {
2237  my_task_data = &(taskdata->ompt_task_info.task_data);
2238  my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
2239 
2240  taskdata->ompt_task_info.frame.enter_frame.ptr = frame_address;
2241 
2242  if (ompt_enabled.ompt_callback_sync_region) {
2243  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2244  ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
2245  my_task_data, return_address);
2246  }
2247 
2248  if (ompt_enabled.ompt_callback_sync_region_wait) {
2249  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2250  ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
2251  my_task_data, return_address);
2252  }
2253  }
2254 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
2255 
2256 // Debugger: The taskwait is active. Store location and thread encountered the
2257 // taskwait.
2258 #if USE_ITT_BUILD
2259 // Note: These values are used by ITT events as well.
2260 #endif /* USE_ITT_BUILD */
2261  taskdata->td_taskwait_counter += 1;
2262  taskdata->td_taskwait_ident = loc_ref;
2263  taskdata->td_taskwait_thread = gtid + 1;
2264 
2265 #if USE_ITT_BUILD
2266  void *itt_sync_obj = NULL;
2267 #if USE_ITT_NOTIFY
2268  KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2269 #endif /* USE_ITT_NOTIFY */
2270 #endif /* USE_ITT_BUILD */
2271 
2272  bool must_wait =
2273  !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
2274 
2275  must_wait = must_wait || (thread->th.th_task_team != NULL &&
2276  thread->th.th_task_team->tt.tt_found_proxy_tasks);
2277  // If hidden helper thread is encountered, we must enable wait here.
2278  must_wait =
2279  must_wait ||
2280  (__kmp_enable_hidden_helper && thread->th.th_task_team != NULL &&
2281  thread->th.th_task_team->tt.tt_hidden_helper_task_encountered);
2282 
2283  if (must_wait) {
2284  kmp_flag_32<false, false> flag(
2285  RCAST(std::atomic<kmp_uint32> *,
2286  &(taskdata->td_incomplete_child_tasks)),
2287  0U);
2288  while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
2289  flag.execute_tasks(thread, gtid, FALSE,
2290  &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2291  __kmp_task_stealing_constraint);
2292  }
2293  }
2294 #if USE_ITT_BUILD
2295  KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2296  KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with children
2297 #endif /* USE_ITT_BUILD */
2298 
2299  // Debugger: The taskwait is completed. Location remains, but thread is
2300  // negated.
2301  taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
2302 
2303 #if OMPT_SUPPORT && OMPT_OPTIONAL
2304  if (ompt) {
2305  if (ompt_enabled.ompt_callback_sync_region_wait) {
2306  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2307  ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
2308  my_task_data, return_address);
2309  }
2310  if (ompt_enabled.ompt_callback_sync_region) {
2311  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2312  ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
2313  my_task_data, return_address);
2314  }
2315  taskdata->ompt_task_info.frame.enter_frame = ompt_data_none;
2316  }
2317 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
2318  }
2319 
2320  KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
2321  "returning TASK_CURRENT_NOT_QUEUED\n",
2322  gtid, taskdata));
2323 
2324  return TASK_CURRENT_NOT_QUEUED;
2325 }
2326 
2327 #if OMPT_SUPPORT && OMPT_OPTIONAL
2328 OMPT_NOINLINE
2329 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
2330  void *frame_address,
2331  void *return_address) {
2332  return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
2333  return_address);
2334 }
2335 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
2336 
2337 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
2338 // complete
2339 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
2340 #if OMPT_SUPPORT && OMPT_OPTIONAL
2341  if (UNLIKELY(ompt_enabled.enabled)) {
2342  OMPT_STORE_RETURN_ADDRESS(gtid);
2343  return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(0),
2344  OMPT_LOAD_RETURN_ADDRESS(gtid));
2345  }
2346 #endif
2347  return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
2348 }
2349 
2350 // __kmpc_omp_taskyield: switch to a different task
2351 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
2352  kmp_taskdata_t *taskdata = NULL;
2353  kmp_info_t *thread;
2354  int thread_finished = FALSE;
2355 
2356  KMP_COUNT_BLOCK(OMP_TASKYIELD);
2357  KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
2358 
2359  KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
2360  gtid, loc_ref, end_part));
2361  __kmp_assert_valid_gtid(gtid);
2362 
2363  if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
2364  thread = __kmp_threads[gtid];
2365  taskdata = thread->th.th_current_task;
2366 // Should we model this as a task wait or not?
2367 // Debugger: The taskwait is active. Store location and thread encountered the
2368 // taskwait.
2369 #if USE_ITT_BUILD
2370 // Note: These values are used by ITT events as well.
2371 #endif /* USE_ITT_BUILD */
2372  taskdata->td_taskwait_counter += 1;
2373  taskdata->td_taskwait_ident = loc_ref;
2374  taskdata->td_taskwait_thread = gtid + 1;
2375 
2376 #if USE_ITT_BUILD
2377  void *itt_sync_obj = NULL;
2378 #if USE_ITT_NOTIFY
2379  KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2380 #endif /* USE_ITT_NOTIFY */
2381 #endif /* USE_ITT_BUILD */
2382  if (!taskdata->td_flags.team_serial) {
2383  kmp_task_team_t *task_team = thread->th.th_task_team;
2384  if (task_team != NULL) {
2385  if (KMP_TASKING_ENABLED(task_team)) {
2386 #if OMPT_SUPPORT
2387  if (UNLIKELY(ompt_enabled.enabled))
2388  thread->th.ompt_thread_info.ompt_task_yielded = 1;
2389 #endif
2390  __kmp_execute_tasks_32(
2391  thread, gtid, (kmp_flag_32<> *)NULL, FALSE,
2392  &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2393  __kmp_task_stealing_constraint);
2394 #if OMPT_SUPPORT
2395  if (UNLIKELY(ompt_enabled.enabled))
2396  thread->th.ompt_thread_info.ompt_task_yielded = 0;
2397 #endif
2398  }
2399  }
2400  }
2401 #if USE_ITT_BUILD
2402  KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2403 #endif /* USE_ITT_BUILD */
2404 
2405  // Debugger: The taskwait is completed. Location remains, but thread is
2406  // negated.
2407  taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
2408  }
2409 
2410  KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
2411  "returning TASK_CURRENT_NOT_QUEUED\n",
2412  gtid, taskdata));
2413 
2414  return TASK_CURRENT_NOT_QUEUED;
2415 }
2416 
2417 // Task Reduction implementation
2418 //
2419 // Note: initial implementation didn't take into account the possibility
2420 // to specify omp_orig for initializer of the UDR (user defined reduction).
2421 // Corrected implementation takes into account the omp_orig object.
2422 // Compiler is free to use old implementation if omp_orig is not specified.
2423 
2432 typedef struct kmp_taskred_flags {
2434  unsigned lazy_priv : 1;
2435  unsigned reserved31 : 31;
2437 
2441 typedef struct kmp_task_red_input {
2442  void *reduce_shar;
2443  size_t reduce_size;
2444  // three compiler-generated routines (init, fini are optional):
2445  void *reduce_init;
2446  void *reduce_fini;
2447  void *reduce_comb;
2450 
2454 typedef struct kmp_taskred_data {
2455  void *reduce_shar;
2456  size_t reduce_size;
2458  void *reduce_priv;
2459  void *reduce_pend;
2460  // three compiler-generated routines (init, fini are optional):
2461  void *reduce_comb;
2462  void *reduce_init;
2463  void *reduce_fini;
2464  void *reduce_orig;
2466 
2472 typedef struct kmp_taskred_input {
2473  void *reduce_shar;
2474  void *reduce_orig;
2475  size_t reduce_size;
2476  // three compiler-generated routines (init, fini are optional):
2477  void *reduce_init;
2478  void *reduce_fini;
2479  void *reduce_comb;
2486 template <typename T> void __kmp_assign_orig(kmp_taskred_data_t &item, T &src);
2487 template <>
2488 void __kmp_assign_orig<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2489  kmp_task_red_input_t &src) {
2490  item.reduce_orig = NULL;
2491 }
2492 template <>
2493 void __kmp_assign_orig<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2494  kmp_taskred_input_t &src) {
2495  if (src.reduce_orig != NULL) {
2496  item.reduce_orig = src.reduce_orig;
2497  } else {
2498  item.reduce_orig = src.reduce_shar;
2499  } // non-NULL reduce_orig means new interface used
2500 }
2501 
2502 template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, size_t j);
2503 template <>
2504 void __kmp_call_init<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2505  size_t offset) {
2506  ((void (*)(void *))item.reduce_init)((char *)(item.reduce_priv) + offset);
2507 }
2508 template <>
2509 void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2510  size_t offset) {
2511  ((void (*)(void *, void *))item.reduce_init)(
2512  (char *)(item.reduce_priv) + offset, item.reduce_orig);
2513 }
2514 
2515 template <typename T>
2516 void *__kmp_task_reduction_init(int gtid, int num, T *data) {
2517  __kmp_assert_valid_gtid(gtid);
2518  kmp_info_t *thread = __kmp_threads[gtid];
2519  kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
2520  kmp_uint32 nth = thread->th.th_team_nproc;
2521  kmp_taskred_data_t *arr;
2522 
2523  // check input data just in case
2524  KMP_ASSERT(tg != NULL);
2525  KMP_ASSERT(data != NULL);
2526  KMP_ASSERT(num > 0);
2527  if (nth == 1 && !__kmp_enable_hidden_helper) {
2528  KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
2529  gtid, tg));
2530  return (void *)tg;
2531  }
2532  KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
2533  gtid, tg, num));
2534  arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2535  thread, num * sizeof(kmp_taskred_data_t));
2536  for (int i = 0; i < num; ++i) {
2537  size_t size = data[i].reduce_size - 1;
2538  // round the size up to cache line per thread-specific item
2539  size += CACHE_LINE - size % CACHE_LINE;
2540  KMP_ASSERT(data[i].reduce_comb != NULL); // combiner is mandatory
2541  arr[i].reduce_shar = data[i].reduce_shar;
2542  arr[i].reduce_size = size;
2543  arr[i].flags = data[i].flags;
2544  arr[i].reduce_comb = data[i].reduce_comb;
2545  arr[i].reduce_init = data[i].reduce_init;
2546  arr[i].reduce_fini = data[i].reduce_fini;
2547  __kmp_assign_orig<T>(arr[i], data[i]);
2548  if (!arr[i].flags.lazy_priv) {
2549  // allocate cache-line aligned block and fill it with zeros
2550  arr[i].reduce_priv = __kmp_allocate(nth * size);
2551  arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
2552  if (arr[i].reduce_init != NULL) {
2553  // initialize all thread-specific items
2554  for (size_t j = 0; j < nth; ++j) {
2555  __kmp_call_init<T>(arr[i], j * size);
2556  }
2557  }
2558  } else {
2559  // only allocate space for pointers now,
2560  // objects will be lazily allocated/initialized if/when requested
2561  // note that __kmp_allocate zeroes the allocated memory
2562  arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
2563  }
2564  }
2565  tg->reduce_data = (void *)arr;
2566  tg->reduce_num_data = num;
2567  return (void *)tg;
2568 }
2569 
2584 void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
2585 #if OMPX_TASKGRAPH
2586  kmp_tdg_info_t *tdg = __kmp_find_tdg(__kmp_curr_tdg_idx);
2587  if (tdg && __kmp_tdg_is_recording(tdg->tdg_status)) {
2588  kmp_tdg_info_t *this_tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
2589  this_tdg->rec_taskred_data =
2590  __kmp_allocate(sizeof(kmp_task_red_input_t) * num);
2591  this_tdg->rec_num_taskred = num;
2592  KMP_MEMCPY(this_tdg->rec_taskred_data, data,
2593  sizeof(kmp_task_red_input_t) * num);
2594  }
2595 #endif
2596  return __kmp_task_reduction_init(gtid, num, (kmp_task_red_input_t *)data);
2597 }
2598 
2611 void *__kmpc_taskred_init(int gtid, int num, void *data) {
2612 #if OMPX_TASKGRAPH
2613  kmp_tdg_info_t *tdg = __kmp_find_tdg(__kmp_curr_tdg_idx);
2614  if (tdg && __kmp_tdg_is_recording(tdg->tdg_status)) {
2615  kmp_tdg_info_t *this_tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
2616  this_tdg->rec_taskred_data =
2617  __kmp_allocate(sizeof(kmp_task_red_input_t) * num);
2618  this_tdg->rec_num_taskred = num;
2619  KMP_MEMCPY(this_tdg->rec_taskred_data, data,
2620  sizeof(kmp_task_red_input_t) * num);
2621  }
2622 #endif
2623  return __kmp_task_reduction_init(gtid, num, (kmp_taskred_input_t *)data);
2624 }
2625 
2626 // Copy task reduction data (except for shared pointers).
2627 template <typename T>
2628 void __kmp_task_reduction_init_copy(kmp_info_t *thr, int num, T *data,
2629  kmp_taskgroup_t *tg, void *reduce_data) {
2630  kmp_taskred_data_t *arr;
2631  KA_TRACE(20, ("__kmp_task_reduction_init_copy: Th %p, init taskgroup %p,"
2632  " from data %p\n",
2633  thr, tg, reduce_data));
2634  arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2635  thr, num * sizeof(kmp_taskred_data_t));
2636  // threads will share private copies, thunk routines, sizes, flags, etc.:
2637  KMP_MEMCPY(arr, reduce_data, num * sizeof(kmp_taskred_data_t));
2638  for (int i = 0; i < num; ++i) {
2639  arr[i].reduce_shar = data[i].reduce_shar; // init unique shared pointers
2640  }
2641  tg->reduce_data = (void *)arr;
2642  tg->reduce_num_data = num;
2643 }
2644 
2654 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
2655  __kmp_assert_valid_gtid(gtid);
2656  kmp_info_t *thread = __kmp_threads[gtid];
2657  kmp_int32 nth = thread->th.th_team_nproc;
2658  if (nth == 1)
2659  return data; // nothing to do
2660 
2661  kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
2662  if (tg == NULL)
2663  tg = thread->th.th_current_task->td_taskgroup;
2664  KMP_ASSERT(tg != NULL);
2665  kmp_taskred_data_t *arr = (kmp_taskred_data_t *)(tg->reduce_data);
2666  kmp_int32 num = tg->reduce_num_data;
2667  kmp_int32 tid = thread->th.th_info.ds.ds_tid;
2668 
2669 #if OMPX_TASKGRAPH
2670  if ((thread->th.th_current_task->is_taskgraph) &&
2671  (!__kmp_tdg_is_recording(
2672  __kmp_global_tdgs[__kmp_curr_tdg_idx]->tdg_status))) {
2673  tg = thread->th.th_current_task->td_taskgroup;
2674  KMP_ASSERT(tg != NULL);
2675  KMP_ASSERT(tg->reduce_data != NULL);
2676  arr = (kmp_taskred_data_t *)(tg->reduce_data);
2677  num = tg->reduce_num_data;
2678  }
2679 #endif
2680 
2681  KMP_ASSERT(data != NULL);
2682  while (tg != NULL) {
2683  for (int i = 0; i < num; ++i) {
2684  if (!arr[i].flags.lazy_priv) {
2685  if (data == arr[i].reduce_shar ||
2686  (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
2687  return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
2688  } else {
2689  // check shared location first
2690  void **p_priv = (void **)(arr[i].reduce_priv);
2691  if (data == arr[i].reduce_shar)
2692  goto found;
2693  // check if we get some thread specific location as parameter
2694  for (int j = 0; j < nth; ++j)
2695  if (data == p_priv[j])
2696  goto found;
2697  continue; // not found, continue search
2698  found:
2699  if (p_priv[tid] == NULL) {
2700  // allocate thread specific object lazily
2701  p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
2702  if (arr[i].reduce_init != NULL) {
2703  if (arr[i].reduce_orig != NULL) { // new interface
2704  ((void (*)(void *, void *))arr[i].reduce_init)(
2705  p_priv[tid], arr[i].reduce_orig);
2706  } else { // old interface (single parameter)
2707  ((void (*)(void *))arr[i].reduce_init)(p_priv[tid]);
2708  }
2709  }
2710  }
2711  return p_priv[tid];
2712  }
2713  }
2714  KMP_ASSERT(tg->parent);
2715  tg = tg->parent;
2716  arr = (kmp_taskred_data_t *)(tg->reduce_data);
2717  num = tg->reduce_num_data;
2718  }
2719  KMP_ASSERT2(0, "Unknown task reduction item");
2720  return NULL; // ERROR, this line never executed
2721 }
2722 
2723 // Finalize task reduction.
2724 // Called from __kmpc_end_taskgroup()
2725 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2726  kmp_int32 nth = th->th.th_team_nproc;
2727  KMP_DEBUG_ASSERT(
2728  nth > 1 ||
2729  __kmp_enable_hidden_helper); // should not be called if nth == 1 unless we
2730  // are using hidden helper threads
2731  kmp_taskred_data_t *arr = (kmp_taskred_data_t *)tg->reduce_data;
2732  kmp_int32 num = tg->reduce_num_data;
2733  for (int i = 0; i < num; ++i) {
2734  void *sh_data = arr[i].reduce_shar;
2735  void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2736  void (*f_comb)(void *, void *) =
2737  (void (*)(void *, void *))(arr[i].reduce_comb);
2738  if (!arr[i].flags.lazy_priv) {
2739  void *pr_data = arr[i].reduce_priv;
2740  size_t size = arr[i].reduce_size;
2741  for (int j = 0; j < nth; ++j) {
2742  void *priv_data = (char *)pr_data + j * size;
2743  f_comb(sh_data, priv_data); // combine results
2744  if (f_fini)
2745  f_fini(priv_data); // finalize if needed
2746  }
2747  } else {
2748  void **pr_data = (void **)(arr[i].reduce_priv);
2749  for (int j = 0; j < nth; ++j) {
2750  if (pr_data[j] != NULL) {
2751  f_comb(sh_data, pr_data[j]); // combine results
2752  if (f_fini)
2753  f_fini(pr_data[j]); // finalize if needed
2754  __kmp_free(pr_data[j]);
2755  }
2756  }
2757  }
2758  __kmp_free(arr[i].reduce_priv);
2759  }
2760  __kmp_thread_free(th, arr);
2761  tg->reduce_data = NULL;
2762  tg->reduce_num_data = 0;
2763 }
2764 
2765 // Cleanup task reduction data for parallel or worksharing,
2766 // do not touch task private data other threads still working with.
2767 // Called from __kmpc_end_taskgroup()
2768 static void __kmp_task_reduction_clean(kmp_info_t *th, kmp_taskgroup_t *tg) {
2769  __kmp_thread_free(th, tg->reduce_data);
2770  tg->reduce_data = NULL;
2771  tg->reduce_num_data = 0;
2772 }
2773 
2774 template <typename T>
2775 void *__kmp_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2776  int num, T *data) {
2777  __kmp_assert_valid_gtid(gtid);
2778  kmp_info_t *thr = __kmp_threads[gtid];
2779  kmp_int32 nth = thr->th.th_team_nproc;
2780  __kmpc_taskgroup(loc, gtid); // form new taskgroup first
2781  if (nth == 1) {
2782  KA_TRACE(10,
2783  ("__kmpc_reduction_modifier_init: T#%d, tg %p, exiting nth=1\n",
2784  gtid, thr->th.th_current_task->td_taskgroup));
2785  return (void *)thr->th.th_current_task->td_taskgroup;
2786  }
2787  kmp_team_t *team = thr->th.th_team;
2788  void *reduce_data;
2789  kmp_taskgroup_t *tg;
2790  reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]);
2791  if (reduce_data == NULL &&
2792  __kmp_atomic_compare_store(&team->t.t_tg_reduce_data[is_ws], reduce_data,
2793  (void *)1)) {
2794  // single thread enters this block to initialize common reduction data
2795  KMP_DEBUG_ASSERT(reduce_data == NULL);
2796  // first initialize own data, then make a copy other threads can use
2797  tg = (kmp_taskgroup_t *)__kmp_task_reduction_init<T>(gtid, num, data);
2798  reduce_data = __kmp_thread_malloc(thr, num * sizeof(kmp_taskred_data_t));
2799  KMP_MEMCPY(reduce_data, tg->reduce_data, num * sizeof(kmp_taskred_data_t));
2800  // fini counters should be 0 at this point
2801  KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[0]) == 0);
2802  KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[1]) == 0);
2803  KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data);
2804  } else {
2805  while (
2806  (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) ==
2807  (void *)1) { // wait for task reduction initialization
2808  KMP_CPU_PAUSE();
2809  }
2810  KMP_DEBUG_ASSERT(reduce_data > (void *)1); // should be valid pointer here
2811  tg = thr->th.th_current_task->td_taskgroup;
2812  __kmp_task_reduction_init_copy<T>(thr, num, data, tg, reduce_data);
2813  }
2814  return tg;
2815 }
2816 
2833 void *__kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2834  int num, void *data) {
2835  return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2836  (kmp_task_red_input_t *)data);
2837 }
2838 
2853 void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num,
2854  void *data) {
2855  return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2856  (kmp_taskred_input_t *)data);
2857 }
2858 
2867 void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws) {
2868  __kmpc_end_taskgroup(loc, gtid);
2869 }
2870 
2871 // __kmpc_taskgroup: Start a new taskgroup
2872 void __kmpc_taskgroup(ident_t *loc, int gtid) {
2873  __kmp_assert_valid_gtid(gtid);
2874  kmp_info_t *thread = __kmp_threads[gtid];
2875  kmp_taskdata_t *taskdata = thread->th.th_current_task;
2876  kmp_taskgroup_t *tg_new =
2877  (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
2878  KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
2879  KMP_ATOMIC_ST_RLX(&tg_new->count, 0);
2880  KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq);
2881  tg_new->parent = taskdata->td_taskgroup;
2882  tg_new->reduce_data = NULL;
2883  tg_new->reduce_num_data = 0;
2884  tg_new->gomp_data = NULL;
2885  taskdata->td_taskgroup = tg_new;
2886 
2887 #if OMPT_SUPPORT && OMPT_OPTIONAL
2888  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2889  void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2890  if (!codeptr)
2891  codeptr = OMPT_GET_RETURN_ADDRESS(0);
2892  kmp_team_t *team = thread->th.th_team;
2893  ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2894  // FIXME: I think this is wrong for lwt!
2895  ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2896 
2897  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2898  ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2899  &(my_task_data), codeptr);
2900  }
2901 #endif
2902 }
2903 
2904 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2905 // and its descendants are complete
2906 void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2907  __kmp_assert_valid_gtid(gtid);
2908  kmp_info_t *thread = __kmp_threads[gtid];
2909  kmp_taskdata_t *taskdata = thread->th.th_current_task;
2910  kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2911  int thread_finished = FALSE;
2912 
2913 #if OMPT_SUPPORT && OMPT_OPTIONAL
2914  kmp_team_t *team;
2915  ompt_data_t my_task_data;
2916  ompt_data_t my_parallel_data;
2917  void *codeptr = nullptr;
2918  if (UNLIKELY(ompt_enabled.enabled)) {
2919  team = thread->th.th_team;
2920  my_task_data = taskdata->ompt_task_info.task_data;
2921  // FIXME: I think this is wrong for lwt!
2922  my_parallel_data = team->t.ompt_team_info.parallel_data;
2923  codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2924  if (!codeptr)
2925  codeptr = OMPT_GET_RETURN_ADDRESS(0);
2926  }
2927 #endif
2928 
2929  KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2930  KMP_DEBUG_ASSERT(taskgroup != NULL);
2931  KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2932 
2933  if (__kmp_tasking_mode != tskm_immediate_exec) {
2934  // mark task as waiting not on a barrier
2935  taskdata->td_taskwait_counter += 1;
2936  taskdata->td_taskwait_ident = loc;
2937  taskdata->td_taskwait_thread = gtid + 1;
2938 #if USE_ITT_BUILD
2939  // For ITT the taskgroup wait is similar to taskwait until we need to
2940  // distinguish them
2941  void *itt_sync_obj = NULL;
2942 #if USE_ITT_NOTIFY
2943  KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2944 #endif /* USE_ITT_NOTIFY */
2945 #endif /* USE_ITT_BUILD */
2946 
2947 #if OMPT_SUPPORT && OMPT_OPTIONAL
2948  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2949  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2950  ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2951  &(my_task_data), codeptr);
2952  }
2953 #endif
2954 
2955  if (!taskdata->td_flags.team_serial ||
2956  (thread->th.th_task_team != NULL &&
2957  (thread->th.th_task_team->tt.tt_found_proxy_tasks ||
2958  thread->th.th_task_team->tt.tt_hidden_helper_task_encountered))) {
2959  kmp_flag_32<false, false> flag(
2960  RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)), 0U);
2961  while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) {
2962  flag.execute_tasks(thread, gtid, FALSE,
2963  &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2964  __kmp_task_stealing_constraint);
2965  }
2966  }
2967  taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2968 
2969 #if OMPT_SUPPORT && OMPT_OPTIONAL
2970  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2971  ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2972  ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2973  &(my_task_data), codeptr);
2974  }
2975 #endif
2976 
2977 #if USE_ITT_BUILD
2978  KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2979  KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with descendants
2980 #endif /* USE_ITT_BUILD */
2981  }
2982  KMP_DEBUG_ASSERT(taskgroup->count == 0);
2983 
2984  if (taskgroup->reduce_data != NULL &&
2985  !taskgroup->gomp_data) { // need to reduce?
2986  int cnt;
2987  void *reduce_data;
2988  kmp_team_t *t = thread->th.th_team;
2989  kmp_taskred_data_t *arr = (kmp_taskred_data_t *)taskgroup->reduce_data;
2990  // check if <priv> data of the first reduction variable shared for the team
2991  void *priv0 = arr[0].reduce_priv;
2992  if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[0])) != NULL &&
2993  ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2994  // finishing task reduction on parallel
2995  cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[0]);
2996  if (cnt == thread->th.th_team_nproc - 1) {
2997  // we are the last thread passing __kmpc_reduction_modifier_fini()
2998  // finalize task reduction:
2999  __kmp_task_reduction_fini(thread, taskgroup);
3000  // cleanup fields in the team structure:
3001  // TODO: is relaxed store enough here (whole barrier should follow)?
3002  __kmp_thread_free(thread, reduce_data);
3003  KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[0], NULL);
3004  KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[0], 0);
3005  } else {
3006  // we are not the last thread passing __kmpc_reduction_modifier_fini(),
3007  // so do not finalize reduction, just clean own copy of the data
3008  __kmp_task_reduction_clean(thread, taskgroup);
3009  }
3010  } else if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[1])) !=
3011  NULL &&
3012  ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
3013  // finishing task reduction on worksharing
3014  cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[1]);
3015  if (cnt == thread->th.th_team_nproc - 1) {
3016  // we are the last thread passing __kmpc_reduction_modifier_fini()
3017  __kmp_task_reduction_fini(thread, taskgroup);
3018  // cleanup fields in team structure:
3019  // TODO: is relaxed store enough here (whole barrier should follow)?
3020  __kmp_thread_free(thread, reduce_data);
3021  KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[1], NULL);
3022  KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[1], 0);
3023  } else {
3024  // we are not the last thread passing __kmpc_reduction_modifier_fini(),
3025  // so do not finalize reduction, just clean own copy of the data
3026  __kmp_task_reduction_clean(thread, taskgroup);
3027  }
3028  } else {
3029  // finishing task reduction on taskgroup
3030  __kmp_task_reduction_fini(thread, taskgroup);
3031  }
3032  }
3033  // Restore parent taskgroup for the current task
3034  taskdata->td_taskgroup = taskgroup->parent;
3035  __kmp_thread_free(thread, taskgroup);
3036 
3037  KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
3038  gtid, taskdata));
3039 
3040 #if OMPT_SUPPORT && OMPT_OPTIONAL
3041  if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
3042  ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
3043  ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
3044  &(my_task_data), codeptr);
3045  }
3046 #endif
3047 }
3048 
3049 static kmp_task_t *__kmp_get_priority_task(kmp_int32 gtid,
3050  kmp_task_team_t *task_team,
3051  kmp_int32 is_constrained) {
3052  kmp_task_t *task = NULL;
3053  kmp_taskdata_t *taskdata;
3054  kmp_taskdata_t *current;
3055  kmp_thread_data_t *thread_data;
3056  int ntasks = task_team->tt.tt_num_task_pri;
3057  if (ntasks == 0) {
3058  KA_TRACE(
3059  20, ("__kmp_get_priority_task(exit #1): T#%d No tasks to get\n", gtid));
3060  return NULL;
3061  }
3062  do {
3063  // decrement num_tasks to "reserve" one task to get for execution
3064  if (__kmp_atomic_compare_store(&task_team->tt.tt_num_task_pri, ntasks,
3065  ntasks - 1))
3066  break;
3067  ntasks = task_team->tt.tt_num_task_pri;
3068  } while (ntasks > 0);
3069  if (ntasks == 0) {
3070  KA_TRACE(20, ("__kmp_get_priority_task(exit #2): T#%d No tasks to get\n",
3071  __kmp_get_gtid()));
3072  return NULL;
3073  }
3074  // We got a "ticket" to get a "reserved" priority task
3075  int deque_ntasks;
3076  kmp_task_pri_t *list = task_team->tt.tt_task_pri_list;
3077  do {
3078  KMP_ASSERT(list != NULL);
3079  thread_data = &list->td;
3080  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3081  deque_ntasks = thread_data->td.td_deque_ntasks;
3082  if (deque_ntasks == 0) {
3083  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3084  KA_TRACE(20, ("__kmp_get_priority_task: T#%d No tasks to get from %p\n",
3085  __kmp_get_gtid(), thread_data));
3086  list = list->next;
3087  }
3088  } while (deque_ntasks == 0);
3089  KMP_DEBUG_ASSERT(deque_ntasks);
3090  int target = thread_data->td.td_deque_head;
3091  current = __kmp_threads[gtid]->th.th_current_task;
3092  taskdata = thread_data->td.td_deque[target];
3093  if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3094  // Bump head pointer and Wrap.
3095  thread_data->td.td_deque_head =
3096  (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3097  } else {
3098  if (!task_team->tt.tt_untied_task_encountered) {
3099  // The TSC does not allow to steal victim task
3100  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3101  KA_TRACE(20, ("__kmp_get_priority_task(exit #3): T#%d could not get task "
3102  "from %p: task_team=%p ntasks=%d head=%u tail=%u\n",
3103  gtid, thread_data, task_team, deque_ntasks, target,
3104  thread_data->td.td_deque_tail));
3105  task_team->tt.tt_num_task_pri++; // atomic inc, restore value
3106  return NULL;
3107  }
3108  int i;
3109  // walk through the deque trying to steal any task
3110  taskdata = NULL;
3111  for (i = 1; i < deque_ntasks; ++i) {
3112  target = (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3113  taskdata = thread_data->td.td_deque[target];
3114  if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3115  break; // found task to execute
3116  } else {
3117  taskdata = NULL;
3118  }
3119  }
3120  if (taskdata == NULL) {
3121  // No appropriate candidate found to execute
3122  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3123  KA_TRACE(
3124  10, ("__kmp_get_priority_task(exit #4): T#%d could not get task from "
3125  "%p: task_team=%p ntasks=%d head=%u tail=%u\n",
3126  gtid, thread_data, task_team, deque_ntasks,
3127  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3128  task_team->tt.tt_num_task_pri++; // atomic inc, restore value
3129  return NULL;
3130  }
3131  int prev = target;
3132  for (i = i + 1; i < deque_ntasks; ++i) {
3133  // shift remaining tasks in the deque left by 1
3134  target = (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3135  thread_data->td.td_deque[prev] = thread_data->td.td_deque[target];
3136  prev = target;
3137  }
3138  KMP_DEBUG_ASSERT(
3139  thread_data->td.td_deque_tail ==
3140  (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(thread_data->td)));
3141  thread_data->td.td_deque_tail = target; // tail -= 1 (wrapped))
3142  }
3143  thread_data->td.td_deque_ntasks = deque_ntasks - 1;
3144  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3145  task = KMP_TASKDATA_TO_TASK(taskdata);
3146  return task;
3147 }
3148 
3149 // __kmp_remove_my_task: remove a task from my own deque
3150 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
3151  kmp_task_team_t *task_team,
3152  kmp_int32 is_constrained) {
3153  kmp_task_t *task;
3154  kmp_taskdata_t *taskdata;
3155  kmp_thread_data_t *thread_data;
3156  kmp_uint32 tail;
3157 
3158  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3159  KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
3160  NULL); // Caller should check this condition
3161 
3162  thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
3163 
3164  KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
3165  gtid, thread_data->td.td_deque_ntasks,
3166  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3167 
3168  if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
3169  KA_TRACE(10,
3170  ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
3171  "ntasks=%d head=%u tail=%u\n",
3172  gtid, thread_data->td.td_deque_ntasks,
3173  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3174  return NULL;
3175  }
3176 
3177  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3178 
3179  if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
3180  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3181  KA_TRACE(10,
3182  ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
3183  "ntasks=%d head=%u tail=%u\n",
3184  gtid, thread_data->td.td_deque_ntasks,
3185  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3186  return NULL;
3187  }
3188 
3189  tail = (thread_data->td.td_deque_tail - 1) &
3190  TASK_DEQUE_MASK(thread_data->td); // Wrap index.
3191  taskdata = thread_data->td.td_deque[tail];
3192 
3193  if (!__kmp_task_is_allowed(gtid, is_constrained, taskdata,
3194  thread->th.th_current_task)) {
3195  // The TSC does not allow to steal victim task
3196  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3197  KA_TRACE(10,
3198  ("__kmp_remove_my_task(exit #3): T#%d TSC blocks tail task: "
3199  "ntasks=%d head=%u tail=%u\n",
3200  gtid, thread_data->td.td_deque_ntasks,
3201  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3202  return NULL;
3203  }
3204 
3205  thread_data->td.td_deque_tail = tail;
3206  TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
3207 
3208  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3209 
3210  KA_TRACE(10, ("__kmp_remove_my_task(exit #4): T#%d task %p removed: "
3211  "ntasks=%d head=%u tail=%u\n",
3212  gtid, taskdata, thread_data->td.td_deque_ntasks,
3213  thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3214 
3215  task = KMP_TASKDATA_TO_TASK(taskdata);
3216  return task;
3217 }
3218 
3219 // __kmp_steal_task: remove a task from another thread's deque
3220 // Assume that calling thread has already checked existence of
3221 // task_team thread_data before calling this routine.
3222 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
3223  kmp_task_team_t *task_team,
3224  std::atomic<kmp_int32> *unfinished_threads,
3225  int *thread_finished,
3226  kmp_int32 is_constrained) {
3227  kmp_task_t *task;
3228  kmp_taskdata_t *taskdata;
3229  kmp_taskdata_t *current;
3230  kmp_thread_data_t *victim_td, *threads_data;
3231  kmp_int32 target;
3232  kmp_int32 victim_tid;
3233 
3234  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3235 
3236  threads_data = task_team->tt.tt_threads_data;
3237  KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
3238 
3239  victim_tid = victim_thr->th.th_info.ds.ds_tid;
3240  victim_td = &threads_data[victim_tid];
3241 
3242  KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
3243  "task_team=%p ntasks=%d head=%u tail=%u\n",
3244  gtid, __kmp_gtid_from_thread(victim_thr), task_team,
3245  victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
3246  victim_td->td.td_deque_tail));
3247 
3248  if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
3249  KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
3250  "task_team=%p ntasks=%d head=%u tail=%u\n",
3251  gtid, __kmp_gtid_from_thread(victim_thr), task_team,
3252  victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
3253  victim_td->td.td_deque_tail));
3254  return NULL;
3255  }
3256 
3257  __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
3258 
3259  int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
3260  // Check again after we acquire the lock
3261  if (ntasks == 0) {
3262  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
3263  KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
3264  "task_team=%p ntasks=%d head=%u tail=%u\n",
3265  gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3266  victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3267  return NULL;
3268  }
3269 
3270  KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
3271  current = __kmp_threads[gtid]->th.th_current_task;
3272  taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
3273  if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3274  // Bump head pointer and Wrap.
3275  victim_td->td.td_deque_head =
3276  (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
3277  } else {
3278  if (!task_team->tt.tt_untied_task_encountered) {
3279  // The TSC does not allow to steal victim task
3280  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
3281  KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d could not steal from "
3282  "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
3283  gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3284  victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3285  return NULL;
3286  }
3287  int i;
3288  // walk through victim's deque trying to steal any task
3289  target = victim_td->td.td_deque_head;
3290  taskdata = NULL;
3291  for (i = 1; i < ntasks; ++i) {
3292  target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
3293  taskdata = victim_td->td.td_deque[target];
3294  if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3295  break; // found victim task
3296  } else {
3297  taskdata = NULL;
3298  }
3299  }
3300  if (taskdata == NULL) {
3301  // No appropriate candidate to steal found
3302  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
3303  KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
3304  "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
3305  gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3306  victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3307  return NULL;
3308  }
3309  int prev = target;
3310  for (i = i + 1; i < ntasks; ++i) {
3311  // shift remaining tasks in the deque left by 1
3312  target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
3313  victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
3314  prev = target;
3315  }
3316  KMP_DEBUG_ASSERT(
3317  victim_td->td.td_deque_tail ==
3318  (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
3319  victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
3320  }
3321  if (*thread_finished) {
3322  // We need to un-mark this victim as a finished victim. This must be done
3323  // before releasing the lock, or else other threads (starting with the
3324  // primary thread victim) might be prematurely released from the barrier!!!
3325 #if KMP_DEBUG
3326  kmp_int32 count =
3327 #endif
3328  KMP_ATOMIC_INC(unfinished_threads);
3329  KA_TRACE(
3330  20,
3331  ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
3332  gtid, count + 1, task_team));
3333  *thread_finished = FALSE;
3334  }
3335  TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
3336 
3337  __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
3338 
3339  KMP_COUNT_BLOCK(TASK_stolen);
3340  KA_TRACE(10,
3341  ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
3342  "task_team=%p ntasks=%d head=%u tail=%u\n",
3343  gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
3344  ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3345 
3346  task = KMP_TASKDATA_TO_TASK(taskdata);
3347  return task;
3348 }
3349 
3350 // __kmp_execute_tasks_template: Choose and execute tasks until either the
3351 // condition is statisfied (return true) or there are none left (return false).
3352 //
3353 // final_spin is TRUE if this is the spin at the release barrier.
3354 // thread_finished indicates whether the thread is finished executing all
3355 // the tasks it has on its deque, and is at the release barrier.
3356 // spinner is the location on which to spin.
3357 // spinner == NULL means only execute a single task and return.
3358 // checker is the value to check to terminate the spin.
3359 template <class C>
3360 static inline int __kmp_execute_tasks_template(
3361  kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
3362  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3363  kmp_int32 is_constrained) {
3364  kmp_task_team_t *task_team = thread->th.th_task_team;
3365  kmp_thread_data_t *threads_data;
3366  kmp_task_t *task;
3367  kmp_info_t *other_thread;
3368  kmp_taskdata_t *current_task = thread->th.th_current_task;
3369  std::atomic<kmp_int32> *unfinished_threads;
3370  kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
3371  tid = thread->th.th_info.ds.ds_tid;
3372 
3373  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3374  KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
3375 
3376  if (task_team == NULL || current_task == NULL)
3377  return FALSE;
3378 
3379  KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
3380  "*thread_finished=%d\n",
3381  gtid, final_spin, *thread_finished));
3382 
3383  thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
3384  threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3385 
3386  KMP_DEBUG_ASSERT(threads_data != NULL);
3387 
3388  nthreads = task_team->tt.tt_nproc;
3389  unfinished_threads = &(task_team->tt.tt_unfinished_threads);
3390  KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks ||
3391  task_team->tt.tt_hidden_helper_task_encountered);
3392  KMP_DEBUG_ASSERT(*unfinished_threads >= 0);
3393 
3394  while (1) { // Outer loop keeps trying to find tasks in case of single thread
3395  // getting tasks from target constructs
3396  while (1) { // Inner loop to find a task and execute it
3397  task = NULL;
3398  if (task_team->tt.tt_num_task_pri) { // get priority task first
3399  task = __kmp_get_priority_task(gtid, task_team, is_constrained);
3400  }
3401  if (task == NULL && use_own_tasks) { // check own queue next
3402  task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
3403  }
3404  if ((task == NULL) && (nthreads > 1)) { // Steal a task finally
3405  int asleep = 1;
3406  use_own_tasks = 0;
3407  // Try to steal from the last place I stole from successfully.
3408  if (victim_tid == -2) { // haven't stolen anything yet
3409  victim_tid = threads_data[tid].td.td_deque_last_stolen;
3410  if (victim_tid !=
3411  -1) // if we have a last stolen from victim, get the thread
3412  other_thread = threads_data[victim_tid].td.td_thr;
3413  }
3414  if (victim_tid != -1) { // found last victim
3415  asleep = 0;
3416  } else if (!new_victim) { // no recent steals and we haven't already
3417  // used a new victim; select a random thread
3418  do { // Find a different thread to steal work from.
3419  // Pick a random thread. Initial plan was to cycle through all the
3420  // threads, and only return if we tried to steal from every thread,
3421  // and failed. Arch says that's not such a great idea.
3422  victim_tid = __kmp_get_random(thread) % (nthreads - 1);
3423  if (victim_tid >= tid) {
3424  ++victim_tid; // Adjusts random distribution to exclude self
3425  }
3426  // Found a potential victim
3427  other_thread = threads_data[victim_tid].td.td_thr;
3428  // There is a slight chance that __kmp_enable_tasking() did not wake
3429  // up all threads waiting at the barrier. If victim is sleeping,
3430  // then wake it up. Since we were going to pay the cache miss
3431  // penalty for referencing another thread's kmp_info_t struct
3432  // anyway,
3433  // the check shouldn't cost too much performance at this point. In
3434  // extra barrier mode, tasks do not sleep at the separate tasking
3435  // barrier, so this isn't a problem.
3436  asleep = 0;
3437  if ((__kmp_tasking_mode == tskm_task_teams) &&
3438  (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
3439  (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
3440  NULL)) {
3441  asleep = 1;
3442  __kmp_null_resume_wrapper(other_thread);
3443  // A sleeping thread should not have any tasks on it's queue.
3444  // There is a slight possibility that it resumes, steals a task
3445  // from another thread, which spawns more tasks, all in the time
3446  // that it takes this thread to check => don't write an assertion
3447  // that the victim's queue is empty. Try stealing from a
3448  // different thread.
3449  }
3450  } while (asleep);
3451  }
3452 
3453  if (!asleep) {
3454  // We have a victim to try to steal from
3455  task = __kmp_steal_task(other_thread, gtid, task_team,
3456  unfinished_threads, thread_finished,
3457  is_constrained);
3458  }
3459  if (task != NULL) { // set last stolen to victim
3460  if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
3461  threads_data[tid].td.td_deque_last_stolen = victim_tid;
3462  // The pre-refactored code did not try more than 1 successful new
3463  // vicitm, unless the last one generated more local tasks;
3464  // new_victim keeps track of this
3465  new_victim = 1;
3466  }
3467  } else { // No tasks found; unset last_stolen
3468  KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
3469  victim_tid = -2; // no successful victim found
3470  }
3471  }
3472 
3473  if (task == NULL)
3474  break; // break out of tasking loop
3475 
3476 // Found a task; execute it
3477 #if USE_ITT_BUILD && USE_ITT_NOTIFY
3478  if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
3479  if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
3480  // get the object reliably
3481  itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
3482  }
3483  __kmp_itt_task_starting(itt_sync_obj);
3484  }
3485 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
3486  __kmp_invoke_task(gtid, task, current_task);
3487 #if USE_ITT_BUILD
3488  if (itt_sync_obj != NULL)
3489  __kmp_itt_task_finished(itt_sync_obj);
3490 #endif /* USE_ITT_BUILD */
3491  // If this thread is only partway through the barrier and the condition is
3492  // met, then return now, so that the barrier gather/release pattern can
3493  // proceed. If this thread is in the last spin loop in the barrier,
3494  // waiting to be released, we know that the termination condition will not
3495  // be satisfied, so don't waste any cycles checking it.
3496  if (flag == NULL || (!final_spin && flag->done_check())) {
3497  KA_TRACE(
3498  15,
3499  ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3500  gtid));
3501  return TRUE;
3502  }
3503  if (thread->th.th_task_team == NULL) {
3504  break;
3505  }
3506  KMP_YIELD(__kmp_library == library_throughput); // Yield before next task
3507  // If execution of a stolen task results in more tasks being placed on our
3508  // run queue, reset use_own_tasks
3509  if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
3510  KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
3511  "other tasks, restart\n",
3512  gtid));
3513  use_own_tasks = 1;
3514  new_victim = 0;
3515  }
3516  }
3517 
3518  // The task source has been exhausted. If in final spin loop of barrier,
3519  // check if termination condition is satisfied. The work queue may be empty
3520  // but there might be proxy tasks still executing.
3521  if (final_spin &&
3522  KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks) == 0) {
3523  // First, decrement the #unfinished threads, if that has not already been
3524  // done. This decrement might be to the spin location, and result in the
3525  // termination condition being satisfied.
3526  if (!*thread_finished) {
3527 #if KMP_DEBUG
3528  kmp_int32 count = -1 +
3529 #endif
3530  KMP_ATOMIC_DEC(unfinished_threads);
3531  KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
3532  "unfinished_threads to %d task_team=%p\n",
3533  gtid, count, task_team));
3534  *thread_finished = TRUE;
3535  }
3536 
3537  // It is now unsafe to reference thread->th.th_team !!!
3538  // Decrementing task_team->tt.tt_unfinished_threads can allow the primary
3539  // thread to pass through the barrier, where it might reset each thread's
3540  // th.th_team field for the next parallel region. If we can steal more
3541  // work, we know that this has not happened yet.
3542  if (flag != NULL && flag->done_check()) {
3543  KA_TRACE(
3544  15,
3545  ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3546  gtid));
3547  return TRUE;
3548  }
3549  }
3550 
3551  // If this thread's task team is NULL, primary thread has recognized that
3552  // there are no more tasks; bail out
3553  if (thread->th.th_task_team == NULL) {
3554  KA_TRACE(15,
3555  ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
3556  return FALSE;
3557  }
3558 
3559  // Check the flag again to see if it has already done in case to be trapped
3560  // into infinite loop when a if0 task depends on a hidden helper task
3561  // outside any parallel region. Detached tasks are not impacted in this case
3562  // because the only thread executing this function has to execute the proxy
3563  // task so it is in another code path that has the same check.
3564  if (flag == NULL || (!final_spin && flag->done_check())) {
3565  KA_TRACE(15,
3566  ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3567  gtid));
3568  return TRUE;
3569  }
3570 
3571  // We could be getting tasks from target constructs; if this is the only
3572  // thread, keep trying to execute tasks from own queue
3573  if (nthreads == 1 &&
3574  KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks))
3575  use_own_tasks = 1;
3576  else {
3577  KA_TRACE(15,
3578  ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
3579  return FALSE;
3580  }
3581  }
3582 }
3583 
3584 template <bool C, bool S>
3585 int __kmp_execute_tasks_32(
3586  kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32<C, S> *flag, int final_spin,
3587  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3588  kmp_int32 is_constrained) {
3589  return __kmp_execute_tasks_template(
3590  thread, gtid, flag, final_spin,
3591  thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3592 }
3593 
3594 template <bool C, bool S>
3595 int __kmp_execute_tasks_64(
3596  kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64<C, S> *flag, int final_spin,
3597  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3598  kmp_int32 is_constrained) {
3599  return __kmp_execute_tasks_template(
3600  thread, gtid, flag, final_spin,
3601  thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3602 }
3603 
3604 template <bool C, bool S>
3605 int __kmp_atomic_execute_tasks_64(
3606  kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64<C, S> *flag,
3607  int final_spin, int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3608  kmp_int32 is_constrained) {
3609  return __kmp_execute_tasks_template(
3610  thread, gtid, flag, final_spin,
3611  thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3612 }
3613 
3614 int __kmp_execute_tasks_oncore(
3615  kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
3616  int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3617  kmp_int32 is_constrained) {
3618  return __kmp_execute_tasks_template(
3619  thread, gtid, flag, final_spin,
3620  thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3621 }
3622 
3623 template int
3624 __kmp_execute_tasks_32<false, false>(kmp_info_t *, kmp_int32,
3625  kmp_flag_32<false, false> *, int,
3626  int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3627 
3628 template int __kmp_execute_tasks_64<false, true>(kmp_info_t *, kmp_int32,
3629  kmp_flag_64<false, true> *,
3630  int,
3631  int *USE_ITT_BUILD_ARG(void *),
3632  kmp_int32);
3633 
3634 template int __kmp_execute_tasks_64<true, false>(kmp_info_t *, kmp_int32,
3635  kmp_flag_64<true, false> *,
3636  int,
3637  int *USE_ITT_BUILD_ARG(void *),
3638  kmp_int32);
3639 
3640 template int __kmp_atomic_execute_tasks_64<false, true>(
3641  kmp_info_t *, kmp_int32, kmp_atomic_flag_64<false, true> *, int,
3642  int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3643 
3644 template int __kmp_atomic_execute_tasks_64<true, false>(
3645  kmp_info_t *, kmp_int32, kmp_atomic_flag_64<true, false> *, int,
3646  int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3647 
3648 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
3649 // next barrier so they can assist in executing enqueued tasks.
3650 // First thread in allocates the task team atomically.
3651 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
3652  kmp_info_t *this_thr) {
3653  kmp_thread_data_t *threads_data;
3654  int nthreads, i, is_init_thread;
3655 
3656  KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
3657  __kmp_gtid_from_thread(this_thr)));
3658 
3659  KMP_DEBUG_ASSERT(task_team != NULL);
3660  KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
3661 
3662  nthreads = task_team->tt.tt_nproc;
3663  KMP_DEBUG_ASSERT(nthreads > 0);
3664  KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
3665 
3666  // Allocate or increase the size of threads_data if necessary
3667  is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
3668 
3669  if (!is_init_thread) {
3670  // Some other thread already set up the array.
3671  KA_TRACE(
3672  20,
3673  ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
3674  __kmp_gtid_from_thread(this_thr)));
3675  return;
3676  }
3677  threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3678  KMP_DEBUG_ASSERT(threads_data != NULL);
3679 
3680  if (__kmp_tasking_mode == tskm_task_teams &&
3681  (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
3682  // Release any threads sleeping at the barrier, so that they can steal
3683  // tasks and execute them. In extra barrier mode, tasks do not sleep
3684  // at the separate tasking barrier, so this isn't a problem.
3685  for (i = 0; i < nthreads; i++) {
3686  void *sleep_loc;
3687  kmp_info_t *thread = threads_data[i].td.td_thr;
3688 
3689  if (i == this_thr->th.th_info.ds.ds_tid) {
3690  continue;
3691  }
3692  // Since we haven't locked the thread's suspend mutex lock at this
3693  // point, there is a small window where a thread might be putting
3694  // itself to sleep, but hasn't set the th_sleep_loc field yet.
3695  // To work around this, __kmp_execute_tasks_template() periodically checks
3696  // see if other threads are sleeping (using the same random mechanism that
3697  // is used for task stealing) and awakens them if they are.
3698  if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3699  NULL) {
3700  KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
3701  __kmp_gtid_from_thread(this_thr),
3702  __kmp_gtid_from_thread(thread)));
3703  __kmp_null_resume_wrapper(thread);
3704  } else {
3705  KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
3706  __kmp_gtid_from_thread(this_thr),
3707  __kmp_gtid_from_thread(thread)));
3708  }
3709  }
3710  }
3711 
3712  KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
3713  __kmp_gtid_from_thread(this_thr)));
3714 }
3715 
3716 /* // TODO: Check the comment consistency
3717  * Utility routines for "task teams". A task team (kmp_task_t) is kind of
3718  * like a shadow of the kmp_team_t data struct, with a different lifetime.
3719  * After a child * thread checks into a barrier and calls __kmp_release() from
3720  * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
3721  * longer assume that the kmp_team_t structure is intact (at any moment, the
3722  * primary thread may exit the barrier code and free the team data structure,
3723  * and return the threads to the thread pool).
3724  *
3725  * This does not work with the tasking code, as the thread is still
3726  * expected to participate in the execution of any tasks that may have been
3727  * spawned my a member of the team, and the thread still needs access to all
3728  * to each thread in the team, so that it can steal work from it.
3729  *
3730  * Enter the existence of the kmp_task_team_t struct. It employs a reference
3731  * counting mechanism, and is allocated by the primary thread before calling
3732  * __kmp_<barrier_kind>_release, and then is release by the last thread to
3733  * exit __kmp_<barrier_kind>_release at the next barrier. I.e. the lifetimes
3734  * of the kmp_task_team_t structs for consecutive barriers can overlap
3735  * (and will, unless the primary thread is the last thread to exit the barrier
3736  * release phase, which is not typical). The existence of such a struct is
3737  * useful outside the context of tasking.
3738  *
3739  * We currently use the existence of the threads array as an indicator that
3740  * tasks were spawned since the last barrier. If the structure is to be
3741  * useful outside the context of tasking, then this will have to change, but
3742  * not setting the field minimizes the performance impact of tasking on
3743  * barriers, when no explicit tasks were spawned (pushed, actually).
3744  */
3745 
3746 static kmp_task_team_t *__kmp_free_task_teams =
3747  NULL; // Free list for task_team data structures
3748 // Lock for task team data structures
3749 kmp_bootstrap_lock_t __kmp_task_team_lock =
3750  KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
3751 
3752 // __kmp_alloc_task_deque:
3753 // Allocates a task deque for a particular thread, and initialize the necessary
3754 // data structures relating to the deque. This only happens once per thread
3755 // per task team since task teams are recycled. No lock is needed during
3756 // allocation since each thread allocates its own deque.
3757 static void __kmp_alloc_task_deque(kmp_info_t *thread,
3758  kmp_thread_data_t *thread_data) {
3759  __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
3760  KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
3761 
3762  // Initialize last stolen task field to "none"
3763  thread_data->td.td_deque_last_stolen = -1;
3764 
3765  KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
3766  KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
3767  KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
3768 
3769  KE_TRACE(
3770  10,
3771  ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
3772  __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
3773  // Allocate space for task deque, and zero the deque
3774  // Cannot use __kmp_thread_calloc() because threads not around for
3775  // kmp_reap_task_team( ).
3776  thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
3777  INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
3778  thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
3779 }
3780 
3781 // __kmp_free_task_deque:
3782 // Deallocates a task deque for a particular thread. Happens at library
3783 // deallocation so don't need to reset all thread data fields.
3784 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3785  if (thread_data->td.td_deque != NULL) {
3786  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3787  TCW_4(thread_data->td.td_deque_ntasks, 0);
3788  __kmp_free(thread_data->td.td_deque);
3789  thread_data->td.td_deque = NULL;
3790  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3791  }
3792 
3793 #ifdef BUILD_TIED_TASK_STACK
3794  // GEH: Figure out what to do here for td_susp_tied_tasks
3795  if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
3796  __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
3797  }
3798 #endif // BUILD_TIED_TASK_STACK
3799 }
3800 
3801 // __kmp_realloc_task_threads_data:
3802 // Allocates a threads_data array for a task team, either by allocating an
3803 // initial array or enlarging an existing array. Only the first thread to get
3804 // the lock allocs or enlarges the array and re-initializes the array elements.
3805 // That thread returns "TRUE", the rest return "FALSE".
3806 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
3807 // The current size is given by task_team -> tt.tt_max_threads.
3808 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
3809  kmp_task_team_t *task_team) {
3810  kmp_thread_data_t **threads_data_p;
3811  kmp_int32 nthreads, maxthreads;
3812  int is_init_thread = FALSE;
3813 
3814  if (TCR_4(task_team->tt.tt_found_tasks)) {
3815  // Already reallocated and initialized.
3816  return FALSE;
3817  }
3818 
3819  threads_data_p = &task_team->tt.tt_threads_data;
3820  nthreads = task_team->tt.tt_nproc;
3821  maxthreads = task_team->tt.tt_max_threads;
3822 
3823  // All threads must lock when they encounter the first task of the implicit
3824  // task region to make sure threads_data fields are (re)initialized before
3825  // used.
3826  __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3827 
3828  if (!TCR_4(task_team->tt.tt_found_tasks)) {
3829  // first thread to enable tasking
3830  kmp_team_t *team = thread->th.th_team;
3831  int i;
3832 
3833  is_init_thread = TRUE;
3834  if (maxthreads < nthreads) {
3835 
3836  if (*threads_data_p != NULL) {
3837  kmp_thread_data_t *old_data = *threads_data_p;
3838  kmp_thread_data_t *new_data = NULL;
3839 
3840  KE_TRACE(
3841  10,
3842  ("__kmp_realloc_task_threads_data: T#%d reallocating "
3843  "threads data for task_team %p, new_size = %d, old_size = %d\n",
3844  __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
3845  // Reallocate threads_data to have more elements than current array
3846  // Cannot use __kmp_thread_realloc() because threads not around for
3847  // kmp_reap_task_team( ). Note all new array entries are initialized
3848  // to zero by __kmp_allocate().
3849  new_data = (kmp_thread_data_t *)__kmp_allocate(
3850  nthreads * sizeof(kmp_thread_data_t));
3851  // copy old data to new data
3852  KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
3853  (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
3854 
3855 #ifdef BUILD_TIED_TASK_STACK
3856  // GEH: Figure out if this is the right thing to do
3857  for (i = maxthreads; i < nthreads; i++) {
3858  kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3859  __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3860  }
3861 #endif // BUILD_TIED_TASK_STACK
3862  // Install the new data and free the old data
3863  (*threads_data_p) = new_data;
3864  __kmp_free(old_data);
3865  } else {
3866  KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
3867  "threads data for task_team %p, size = %d\n",
3868  __kmp_gtid_from_thread(thread), task_team, nthreads));
3869  // Make the initial allocate for threads_data array, and zero entries
3870  // Cannot use __kmp_thread_calloc() because threads not around for
3871  // kmp_reap_task_team( ).
3872  *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
3873  nthreads * sizeof(kmp_thread_data_t));
3874 #ifdef BUILD_TIED_TASK_STACK
3875  // GEH: Figure out if this is the right thing to do
3876  for (i = 0; i < nthreads; i++) {
3877  kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3878  __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3879  }
3880 #endif // BUILD_TIED_TASK_STACK
3881  }
3882  task_team->tt.tt_max_threads = nthreads;
3883  } else {
3884  // If array has (more than) enough elements, go ahead and use it
3885  KMP_DEBUG_ASSERT(*threads_data_p != NULL);
3886  }
3887 
3888  // initialize threads_data pointers back to thread_info structures
3889  for (i = 0; i < nthreads; i++) {
3890  kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3891  thread_data->td.td_thr = team->t.t_threads[i];
3892 
3893  if (thread_data->td.td_deque_last_stolen >= nthreads) {
3894  // The last stolen field survives across teams / barrier, and the number
3895  // of threads may have changed. It's possible (likely?) that a new
3896  // parallel region will exhibit the same behavior as previous region.
3897  thread_data->td.td_deque_last_stolen = -1;
3898  }
3899  }
3900 
3901  KMP_MB();
3902  TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
3903  }
3904 
3905  __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3906  return is_init_thread;
3907 }
3908 
3909 // __kmp_free_task_threads_data:
3910 // Deallocates a threads_data array for a task team, including any attached
3911 // tasking deques. Only occurs at library shutdown.
3912 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3913  __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3914  if (task_team->tt.tt_threads_data != NULL) {
3915  int i;
3916  for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3917  __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3918  }
3919  __kmp_free(task_team->tt.tt_threads_data);
3920  task_team->tt.tt_threads_data = NULL;
3921  }
3922  __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3923 }
3924 
3925 // __kmp_free_task_pri_list:
3926 // Deallocates tasking deques used for priority tasks.
3927 // Only occurs at library shutdown.
3928 static void __kmp_free_task_pri_list(kmp_task_team_t *task_team) {
3929  __kmp_acquire_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
3930  if (task_team->tt.tt_task_pri_list != NULL) {
3931  kmp_task_pri_t *list = task_team->tt.tt_task_pri_list;
3932  while (list != NULL) {
3933  kmp_task_pri_t *next = list->next;
3934  __kmp_free_task_deque(&list->td);
3935  __kmp_free(list);
3936  list = next;
3937  }
3938  task_team->tt.tt_task_pri_list = NULL;
3939  }
3940  __kmp_release_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
3941 }
3942 
3943 // __kmp_allocate_task_team:
3944 // Allocates a task team associated with a specific team, taking it from
3945 // the global task team free list if possible. Also initializes data
3946 // structures.
3947 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3948  kmp_team_t *team) {
3949  kmp_task_team_t *task_team = NULL;
3950  int nthreads;
3951 
3952  KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
3953  (thread ? __kmp_gtid_from_thread(thread) : -1), team));
3954 
3955  if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3956  // Take a task team from the task team pool
3957  __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3958  if (__kmp_free_task_teams != NULL) {
3959  task_team = __kmp_free_task_teams;
3960  TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3961  task_team->tt.tt_next = NULL;
3962  }
3963  __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3964  }
3965 
3966  if (task_team == NULL) {
3967  KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
3968  "task team for team %p\n",
3969  __kmp_gtid_from_thread(thread), team));
3970  // Allocate a new task team if one is not available. Cannot use
3971  // __kmp_thread_malloc because threads not around for kmp_reap_task_team.
3972  task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3973  __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3974  __kmp_init_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
3975 #if USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG
3976  // suppress race conditions detection on synchronization flags in debug mode
3977  // this helps to analyze library internals eliminating false positives
3978  __itt_suppress_mark_range(
3979  __itt_suppress_range, __itt_suppress_threading_errors,
3980  &task_team->tt.tt_found_tasks, sizeof(task_team->tt.tt_found_tasks));
3981  __itt_suppress_mark_range(__itt_suppress_range,
3982  __itt_suppress_threading_errors,
3983  CCAST(kmp_uint32 *, &task_team->tt.tt_active),
3984  sizeof(task_team->tt.tt_active));
3985 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG */
3986  // Note: __kmp_allocate zeroes returned memory, othewise we would need:
3987  // task_team->tt.tt_threads_data = NULL;
3988  // task_team->tt.tt_max_threads = 0;
3989  // task_team->tt.tt_next = NULL;
3990  }
3991 
3992  TCW_4(task_team->tt.tt_found_tasks, FALSE);
3993  TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3994  TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
3995  task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3996 
3997  KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
3998  TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
3999  TCW_4(task_team->tt.tt_active, TRUE);
4000 
4001  KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
4002  "unfinished_threads init'd to %d\n",
4003  (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
4004  KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
4005  return task_team;
4006 }
4007 
4008 // __kmp_free_task_team:
4009 // Frees the task team associated with a specific thread, and adds it
4010 // to the global task team free list.
4011 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
4012  KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
4013  thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
4014 
4015  // Put task team back on free list
4016  __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
4017 
4018  KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
4019  task_team->tt.tt_next = __kmp_free_task_teams;
4020  TCW_PTR(__kmp_free_task_teams, task_team);
4021 
4022  __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
4023 }
4024 
4025 // __kmp_reap_task_teams:
4026 // Free all the task teams on the task team free list.
4027 // Should only be done during library shutdown.
4028 // Cannot do anything that needs a thread structure or gtid since they are
4029 // already gone.
4030 void __kmp_reap_task_teams(void) {
4031  kmp_task_team_t *task_team;
4032 
4033  if (TCR_PTR(__kmp_free_task_teams) != NULL) {
4034  // Free all task_teams on the free list
4035  __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
4036  while ((task_team = __kmp_free_task_teams) != NULL) {
4037  __kmp_free_task_teams = task_team->tt.tt_next;
4038  task_team->tt.tt_next = NULL;
4039 
4040  // Free threads_data if necessary
4041  if (task_team->tt.tt_threads_data != NULL) {
4042  __kmp_free_task_threads_data(task_team);
4043  }
4044  if (task_team->tt.tt_task_pri_list != NULL) {
4045  __kmp_free_task_pri_list(task_team);
4046  }
4047  __kmp_free(task_team);
4048  }
4049  __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
4050  }
4051 }
4052 
4053 // __kmp_wait_to_unref_task_teams:
4054 // Some threads could still be in the fork barrier release code, possibly
4055 // trying to steal tasks. Wait for each thread to unreference its task team.
4056 void __kmp_wait_to_unref_task_teams(void) {
4057  kmp_info_t *thread;
4058  kmp_uint32 spins;
4059  kmp_uint64 time;
4060  int done;
4061 
4062  KMP_INIT_YIELD(spins);
4063  KMP_INIT_BACKOFF(time);
4064 
4065  for (;;) {
4066  done = TRUE;
4067 
4068  // TODO: GEH - this may be is wrong because some sync would be necessary
4069  // in case threads are added to the pool during the traversal. Need to
4070  // verify that lock for thread pool is held when calling this routine.
4071  for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
4072  thread = thread->th.th_next_pool) {
4073 #if KMP_OS_WINDOWS
4074  DWORD exit_val;
4075 #endif
4076  if (TCR_PTR(thread->th.th_task_team) == NULL) {
4077  KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
4078  __kmp_gtid_from_thread(thread)));
4079  continue;
4080  }
4081 #if KMP_OS_WINDOWS
4082  // TODO: GEH - add this check for Linux* OS / OS X* as well?
4083  if (!__kmp_is_thread_alive(thread, &exit_val)) {
4084  thread->th.th_task_team = NULL;
4085  continue;
4086  }
4087 #endif
4088 
4089  done = FALSE; // Because th_task_team pointer is not NULL for this thread
4090 
4091  KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
4092  "unreference task_team\n",
4093  __kmp_gtid_from_thread(thread)));
4094 
4095  if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
4096  void *sleep_loc;
4097  // If the thread is sleeping, awaken it.
4098  if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
4099  NULL) {
4100  KA_TRACE(
4101  10,
4102  ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
4103  __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
4104  __kmp_null_resume_wrapper(thread);
4105  }
4106  }
4107  }
4108  if (done) {
4109  break;
4110  }
4111 
4112  // If oversubscribed or have waited a bit, yield.
4113  KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
4114  }
4115 }
4116 
4117 void __kmp_shift_task_state_stack(kmp_info_t *this_thr, kmp_uint8 value) {
4118  // Shift values from th_task_state_top+1 to task_state_stack_sz
4119  if (this_thr->th.th_task_state_top + 1 >=
4120  this_thr->th.th_task_state_stack_sz) { // increase size
4121  kmp_uint32 new_size = 2 * this_thr->th.th_task_state_stack_sz;
4122  kmp_uint8 *old_stack, *new_stack;
4123  kmp_uint32 i;
4124  new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
4125  for (i = 0; i <= this_thr->th.th_task_state_top; ++i) {
4126  new_stack[i] = this_thr->th.th_task_state_memo_stack[i];
4127  }
4128  // If we need to reallocate do the shift at the same time.
4129  for (; i < this_thr->th.th_task_state_stack_sz; ++i) {
4130  new_stack[i + 1] = this_thr->th.th_task_state_memo_stack[i];
4131  }
4132  for (i = this_thr->th.th_task_state_stack_sz; i < new_size;
4133  ++i) { // zero-init rest of stack
4134  new_stack[i] = 0;
4135  }
4136  old_stack = this_thr->th.th_task_state_memo_stack;
4137  this_thr->th.th_task_state_memo_stack = new_stack;
4138  this_thr->th.th_task_state_stack_sz = new_size;
4139  __kmp_free(old_stack);
4140  } else {
4141  kmp_uint8 *end;
4142  kmp_uint32 i;
4143 
4144  end = &this_thr->th
4145  .th_task_state_memo_stack[this_thr->th.th_task_state_stack_sz];
4146 
4147  for (i = this_thr->th.th_task_state_stack_sz - 1;
4148  i > this_thr->th.th_task_state_top; i--, end--)
4149  end[0] = end[-1];
4150  }
4151  this_thr->th.th_task_state_memo_stack[this_thr->th.th_task_state_top + 1] =
4152  value;
4153 }
4154 
4155 // __kmp_task_team_setup: Create a task_team for the current team, but use
4156 // an already created, unused one if it already exists.
4157 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
4158  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
4159 
4160  // If this task_team hasn't been created yet, allocate it. It will be used in
4161  // the region after the next.
4162  // If it exists, it is the current task team and shouldn't be touched yet as
4163  // it may still be in use.
4164  if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
4165  (always || team->t.t_nproc > 1)) {
4166  team->t.t_task_team[this_thr->th.th_task_state] =
4167  __kmp_allocate_task_team(this_thr, team);
4168  KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created new task_team %p"
4169  " for team %d at parity=%d\n",
4170  __kmp_gtid_from_thread(this_thr),
4171  team->t.t_task_team[this_thr->th.th_task_state], team->t.t_id,
4172  this_thr->th.th_task_state));
4173  }
4174  if (this_thr->th.th_task_state == 1 && always && team->t.t_nproc == 1) {
4175  // fix task state stack to adjust for proxy and helper tasks
4176  KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d needs to shift stack"
4177  " for team %d at parity=%d\n",
4178  __kmp_gtid_from_thread(this_thr), team->t.t_id,
4179  this_thr->th.th_task_state));
4180  __kmp_shift_task_state_stack(this_thr, this_thr->th.th_task_state);
4181  }
4182 
4183  // After threads exit the release, they will call sync, and then point to this
4184  // other task_team; make sure it is allocated and properly initialized. As
4185  // threads spin in the barrier release phase, they will continue to use the
4186  // previous task_team struct(above), until they receive the signal to stop
4187  // checking for tasks (they can't safely reference the kmp_team_t struct,
4188  // which could be reallocated by the primary thread). No task teams are formed
4189  // for serialized teams.
4190  if (team->t.t_nproc > 1) {
4191  int other_team = 1 - this_thr->th.th_task_state;
4192  KMP_DEBUG_ASSERT(other_team >= 0 && other_team < 2);
4193  if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
4194  team->t.t_task_team[other_team] =
4195  __kmp_allocate_task_team(this_thr, team);
4196  KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created second new "
4197  "task_team %p for team %d at parity=%d\n",
4198  __kmp_gtid_from_thread(this_thr),
4199  team->t.t_task_team[other_team], team->t.t_id, other_team));
4200  } else { // Leave the old task team struct in place for the upcoming region;
4201  // adjust as needed
4202  kmp_task_team_t *task_team = team->t.t_task_team[other_team];
4203  if (!task_team->tt.tt_active ||
4204  team->t.t_nproc != task_team->tt.tt_nproc) {
4205  TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
4206  TCW_4(task_team->tt.tt_found_tasks, FALSE);
4207  TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
4208  TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
4209  KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
4210  team->t.t_nproc);
4211  TCW_4(task_team->tt.tt_active, TRUE);
4212  }
4213  // if team size has changed, the first thread to enable tasking will
4214  // realloc threads_data if necessary
4215  KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d reset next task_team "
4216  "%p for team %d at parity=%d\n",
4217  __kmp_gtid_from_thread(this_thr),
4218  team->t.t_task_team[other_team], team->t.t_id, other_team));
4219  }
4220  }
4221 
4222  // For regular thread, task enabling should be called when the task is going
4223  // to be pushed to a dequeue. However, for the hidden helper thread, we need
4224  // it ahead of time so that some operations can be performed without race
4225  // condition.
4226  if (this_thr == __kmp_hidden_helper_main_thread) {
4227  for (int i = 0; i < 2; ++i) {
4228  kmp_task_team_t *task_team = team->t.t_task_team[i];
4229  if (KMP_TASKING_ENABLED(task_team)) {
4230  continue;
4231  }
4232  __kmp_enable_tasking(task_team, this_thr);
4233  for (int j = 0; j < task_team->tt.tt_nproc; ++j) {
4234  kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[j];
4235  if (thread_data->td.td_deque == NULL) {
4236  __kmp_alloc_task_deque(__kmp_hidden_helper_threads[j], thread_data);
4237  }
4238  }
4239  }
4240  }
4241 }
4242 
4243 // __kmp_task_team_sync: Propagation of task team data from team to threads
4244 // which happens just after the release phase of a team barrier. This may be
4245 // called by any thread, but only for teams with # threads > 1.
4246 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
4247  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
4248 
4249  // Toggle the th_task_state field, to switch which task_team this thread
4250  // refers to
4251  this_thr->th.th_task_state = (kmp_uint8)(1 - this_thr->th.th_task_state);
4252 
4253  // It is now safe to propagate the task team pointer from the team struct to
4254  // the current thread.
4255  TCW_PTR(this_thr->th.th_task_team,
4256  team->t.t_task_team[this_thr->th.th_task_state]);
4257  KA_TRACE(20,
4258  ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
4259  "%p from Team #%d (parity=%d)\n",
4260  __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
4261  team->t.t_id, this_thr->th.th_task_state));
4262 }
4263 
4264 // __kmp_task_team_wait: Primary thread waits for outstanding tasks after the
4265 // barrier gather phase. Only called by primary thread if #threads in team > 1
4266 // or if proxy tasks were created.
4267 //
4268 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
4269 // by passing in 0 optionally as the last argument. When wait is zero, primary
4270 // thread does not wait for unfinished_threads to reach 0.
4271 void __kmp_task_team_wait(
4272  kmp_info_t *this_thr,
4273  kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
4274  kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
4275 
4276  KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
4277  KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
4278 
4279  if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
4280  if (wait) {
4281  KA_TRACE(20, ("__kmp_task_team_wait: Primary T#%d waiting for all tasks "
4282  "(for unfinished_threads to reach 0) on task_team = %p\n",
4283  __kmp_gtid_from_thread(this_thr), task_team));
4284  // Worker threads may have dropped through to release phase, but could
4285  // still be executing tasks. Wait here for tasks to complete. To avoid
4286  // memory contention, only primary thread checks termination condition.
4287  kmp_flag_32<false, false> flag(
4288  RCAST(std::atomic<kmp_uint32> *,
4289  &task_team->tt.tt_unfinished_threads),
4290  0U);
4291  flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
4292  }
4293  // Deactivate the old task team, so that the worker threads will stop
4294  // referencing it while spinning.
4295  KA_TRACE(
4296  20,
4297  ("__kmp_task_team_wait: Primary T#%d deactivating task_team %p: "
4298  "setting active to false, setting local and team's pointer to NULL\n",
4299  __kmp_gtid_from_thread(this_thr), task_team));
4300  KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
4301  task_team->tt.tt_found_proxy_tasks == TRUE ||
4302  task_team->tt.tt_hidden_helper_task_encountered == TRUE);
4303  TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
4304  TCW_SYNC_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
4305  KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
4306  TCW_SYNC_4(task_team->tt.tt_active, FALSE);
4307  KMP_MB();
4308 
4309  TCW_PTR(this_thr->th.th_task_team, NULL);
4310  }
4311 }
4312 
4313 // __kmp_tasking_barrier:
4314 // This routine is called only when __kmp_tasking_mode == tskm_extra_barrier.
4315 // Internal function to execute all tasks prior to a regular barrier or a join
4316 // barrier. It is a full barrier itself, which unfortunately turns regular
4317 // barriers into double barriers and join barriers into 1 1/2 barriers.
4318 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
4319  std::atomic<kmp_uint32> *spin = RCAST(
4320  std::atomic<kmp_uint32> *,
4321  &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
4322  int flag = FALSE;
4323  KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
4324 
4325 #if USE_ITT_BUILD
4326  KMP_FSYNC_SPIN_INIT(spin, NULL);
4327 #endif /* USE_ITT_BUILD */
4328  kmp_flag_32<false, false> spin_flag(spin, 0U);
4329  while (!spin_flag.execute_tasks(thread, gtid, TRUE,
4330  &flag USE_ITT_BUILD_ARG(NULL), 0)) {
4331 #if USE_ITT_BUILD
4332  // TODO: What about itt_sync_obj??
4333  KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin));
4334 #endif /* USE_ITT_BUILD */
4335 
4336  if (TCR_4(__kmp_global.g.g_done)) {
4337  if (__kmp_global.g.g_abort)
4338  __kmp_abort_thread();
4339  break;
4340  }
4341  KMP_YIELD(TRUE);
4342  }
4343 #if USE_ITT_BUILD
4344  KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin));
4345 #endif /* USE_ITT_BUILD */
4346 }
4347 
4348 // __kmp_give_task puts a task into a given thread queue if:
4349 // - the queue for that thread was created
4350 // - there's space in that queue
4351 // Because of this, __kmp_push_task needs to check if there's space after
4352 // getting the lock
4353 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
4354  kmp_int32 pass) {
4355  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4356  kmp_task_team_t *task_team = taskdata->td_task_team;
4357 
4358  KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
4359  taskdata, tid));
4360 
4361  // If task_team is NULL something went really bad...
4362  KMP_DEBUG_ASSERT(task_team != NULL);
4363 
4364  bool result = false;
4365  kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
4366 
4367  if (thread_data->td.td_deque == NULL) {
4368  // There's no queue in this thread, go find another one
4369  // We're guaranteed that at least one thread has a queue
4370  KA_TRACE(30,
4371  ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
4372  tid, taskdata));
4373  return result;
4374  }
4375 
4376  if (TCR_4(thread_data->td.td_deque_ntasks) >=
4377  TASK_DEQUE_SIZE(thread_data->td)) {
4378  KA_TRACE(
4379  30,
4380  ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
4381  taskdata, tid));
4382 
4383  // if this deque is bigger than the pass ratio give a chance to another
4384  // thread
4385  if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
4386  return result;
4387 
4388  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
4389  if (TCR_4(thread_data->td.td_deque_ntasks) >=
4390  TASK_DEQUE_SIZE(thread_data->td)) {
4391  // expand deque to push the task which is not allowed to execute
4392  __kmp_realloc_task_deque(thread, thread_data);
4393  }
4394 
4395  } else {
4396 
4397  __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
4398 
4399  if (TCR_4(thread_data->td.td_deque_ntasks) >=
4400  TASK_DEQUE_SIZE(thread_data->td)) {
4401  KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
4402  "thread %d.\n",
4403  taskdata, tid));
4404 
4405  // if this deque is bigger than the pass ratio give a chance to another
4406  // thread
4407  if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
4408  goto release_and_exit;
4409 
4410  __kmp_realloc_task_deque(thread, thread_data);
4411  }
4412  }
4413 
4414  // lock is held here, and there is space in the deque
4415 
4416  thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
4417  // Wrap index.
4418  thread_data->td.td_deque_tail =
4419  (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
4420  TCW_4(thread_data->td.td_deque_ntasks,
4421  TCR_4(thread_data->td.td_deque_ntasks) + 1);
4422 
4423  result = true;
4424  KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
4425  taskdata, tid));
4426 
4427 release_and_exit:
4428  __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
4429 
4430  return result;
4431 }
4432 
4433 #define PROXY_TASK_FLAG 0x40000000
4434 /* The finish of the proxy tasks is divided in two pieces:
4435  - the top half is the one that can be done from a thread outside the team
4436  - the bottom half must be run from a thread within the team
4437 
4438  In order to run the bottom half the task gets queued back into one of the
4439  threads of the team. Once the td_incomplete_child_task counter of the parent
4440  is decremented the threads can leave the barriers. So, the bottom half needs
4441  to be queued before the counter is decremented. The top half is therefore
4442  divided in two parts:
4443  - things that can be run before queuing the bottom half
4444  - things that must be run after queuing the bottom half
4445 
4446  This creates a second race as the bottom half can free the task before the
4447  second top half is executed. To avoid this we use the
4448  td_incomplete_child_task of the proxy task to synchronize the top and bottom
4449  half. */
4450 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
4451  KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
4452  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4453  KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
4454  KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
4455 
4456  taskdata->td_flags.complete = 1; // mark the task as completed
4457 #if OMPX_TASKGRAPH
4458  taskdata->td_flags.onced = 1;
4459 #endif
4460 
4461  if (taskdata->td_taskgroup)
4462  KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
4463 
4464  // Create an imaginary children for this task so the bottom half cannot
4465  // release the task before we have completed the second top half
4466  KMP_ATOMIC_OR(&taskdata->td_incomplete_child_tasks, PROXY_TASK_FLAG);
4467 }
4468 
4469 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
4470 #if KMP_DEBUG
4471  kmp_int32 children = 0;
4472  // Predecrement simulated by "- 1" calculation
4473  children = -1 +
4474 #endif
4475  KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
4476  KMP_DEBUG_ASSERT(children >= 0);
4477 
4478  // Remove the imaginary children
4479  KMP_ATOMIC_AND(&taskdata->td_incomplete_child_tasks, ~PROXY_TASK_FLAG);
4480 }
4481 
4482 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
4483  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4484  kmp_info_t *thread = __kmp_threads[gtid];
4485 
4486  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4487  KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
4488  1); // top half must run before bottom half
4489 
4490  // We need to wait to make sure the top half is finished
4491  // Spinning here should be ok as this should happen quickly
4492  while ((KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) &
4493  PROXY_TASK_FLAG) > 0)
4494  ;
4495 
4496  __kmp_release_deps(gtid, taskdata);
4497  __kmp_free_task_and_ancestors(gtid, taskdata, thread);
4498 }
4499 
4508 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
4509  KMP_DEBUG_ASSERT(ptask != NULL);
4510  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4511  KA_TRACE(
4512  10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
4513  gtid, taskdata));
4514  __kmp_assert_valid_gtid(gtid);
4515  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4516 
4517  __kmp_first_top_half_finish_proxy(taskdata);
4518  __kmp_second_top_half_finish_proxy(taskdata);
4519  __kmp_bottom_half_finish_proxy(gtid, ptask);
4520 
4521  KA_TRACE(10,
4522  ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
4523  gtid, taskdata));
4524 }
4525 
4526 void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start = 0) {
4527  KMP_DEBUG_ASSERT(ptask != NULL);
4528  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4529 
4530  // Enqueue task to complete bottom half completion from a thread within the
4531  // corresponding team
4532  kmp_team_t *team = taskdata->td_team;
4533  kmp_int32 nthreads = team->t.t_nproc;
4534  kmp_info_t *thread;
4535 
4536  // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
4537  // but we cannot use __kmp_get_random here
4538  kmp_int32 start_k = start % nthreads;
4539  kmp_int32 pass = 1;
4540  kmp_int32 k = start_k;
4541 
4542  do {
4543  // For now we're just linearly trying to find a thread
4544  thread = team->t.t_threads[k];
4545  k = (k + 1) % nthreads;
4546 
4547  // we did a full pass through all the threads
4548  if (k == start_k)
4549  pass = pass << 1;
4550 
4551  } while (!__kmp_give_task(thread, k, ptask, pass));
4552 
4553  if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME && __kmp_wpolicy_passive) {
4554  // awake at least one thread to execute given task
4555  for (int i = 0; i < nthreads; ++i) {
4556  thread = team->t.t_threads[i];
4557  if (thread->th.th_sleep_loc != NULL) {
4558  __kmp_null_resume_wrapper(thread);
4559  break;
4560  }
4561  }
4562  }
4563 }
4564 
4572 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
4573  KMP_DEBUG_ASSERT(ptask != NULL);
4574  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4575 
4576  KA_TRACE(
4577  10,
4578  ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
4579  taskdata));
4580 
4581  KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4582 
4583  __kmp_first_top_half_finish_proxy(taskdata);
4584 
4585  __kmpc_give_task(ptask);
4586 
4587  __kmp_second_top_half_finish_proxy(taskdata);
4588 
4589  KA_TRACE(
4590  10,
4591  ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
4592  taskdata));
4593 }
4594 
4595 kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid,
4596  kmp_task_t *task) {
4597  kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
4598  if (td->td_allow_completion_event.type == KMP_EVENT_UNINITIALIZED) {
4599  td->td_allow_completion_event.type = KMP_EVENT_ALLOW_COMPLETION;
4600  td->td_allow_completion_event.ed.task = task;
4601  __kmp_init_tas_lock(&td->td_allow_completion_event.lock);
4602  }
4603  return &td->td_allow_completion_event;
4604 }
4605 
4606 void __kmp_fulfill_event(kmp_event_t *event) {
4607  if (event->type == KMP_EVENT_ALLOW_COMPLETION) {
4608  kmp_task_t *ptask = event->ed.task;
4609  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4610  bool detached = false;
4611  int gtid = __kmp_get_gtid();
4612 
4613  // The associated task might have completed or could be completing at this
4614  // point.
4615  // We need to take the lock to avoid races
4616  __kmp_acquire_tas_lock(&event->lock, gtid);
4617  if (taskdata->td_flags.proxy == TASK_PROXY) {
4618  detached = true;
4619  } else {
4620 #if OMPT_SUPPORT
4621  // The OMPT event must occur under mutual exclusion,
4622  // otherwise the tool might access ptask after free
4623  if (UNLIKELY(ompt_enabled.enabled))
4624  __ompt_task_finish(ptask, NULL, ompt_task_early_fulfill);
4625 #endif
4626  }
4627  event->type = KMP_EVENT_UNINITIALIZED;
4628  __kmp_release_tas_lock(&event->lock, gtid);
4629 
4630  if (detached) {
4631 #if OMPT_SUPPORT
4632  // We free ptask afterwards and know the task is finished,
4633  // so locking is not necessary
4634  if (UNLIKELY(ompt_enabled.enabled))
4635  __ompt_task_finish(ptask, NULL, ompt_task_late_fulfill);
4636 #endif
4637  // If the task detached complete the proxy task
4638  if (gtid >= 0) {
4639  kmp_team_t *team = taskdata->td_team;
4640  kmp_info_t *thread = __kmp_get_thread();
4641  if (thread->th.th_team == team) {
4642  __kmpc_proxy_task_completed(gtid, ptask);
4643  return;
4644  }
4645  }
4646 
4647  // fallback
4649  }
4650  }
4651 }
4652 
4653 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
4654 // for taskloop
4655 //
4656 // thread: allocating thread
4657 // task_src: pointer to source task to be duplicated
4658 // taskloop_recur: used only when dealing with taskgraph,
4659 // indicating whether we need to update task->td_task_id
4660 // returns: a pointer to the allocated kmp_task_t structure (task).
4661 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src
4662 #if OMPX_TASKGRAPH
4663  , int taskloop_recur
4664 #endif
4665 ) {
4666  kmp_task_t *task;
4667  kmp_taskdata_t *taskdata;
4668  kmp_taskdata_t *taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
4669  kmp_taskdata_t *parent_task = taskdata_src->td_parent; // same parent task
4670  size_t shareds_offset;
4671  size_t task_size;
4672 
4673  KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
4674  task_src));
4675  KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
4676  TASK_FULL); // it should not be proxy task
4677  KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
4678  task_size = taskdata_src->td_size_alloc;
4679 
4680  // Allocate a kmp_taskdata_t block and a kmp_task_t block.
4681  KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
4682  task_size));
4683 #if USE_FAST_MEMORY
4684  taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
4685 #else
4686  taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
4687 #endif /* USE_FAST_MEMORY */
4688  KMP_MEMCPY(taskdata, taskdata_src, task_size);
4689 
4690  task = KMP_TASKDATA_TO_TASK(taskdata);
4691 
4692  // Initialize new task (only specific fields not affected by memcpy)
4693 #if OMPX_TASKGRAPH
4694  if (!taskdata->is_taskgraph || taskloop_recur)
4695  taskdata->td_task_id = KMP_GEN_TASK_ID();
4696  else if (taskdata->is_taskgraph &&
4697  __kmp_tdg_is_recording(taskdata_src->tdg->tdg_status))
4698  taskdata->td_task_id = KMP_ATOMIC_INC(&__kmp_tdg_task_id);
4699 #else
4700  taskdata->td_task_id = KMP_GEN_TASK_ID();
4701 #endif
4702  if (task->shareds != NULL) { // need setup shareds pointer
4703  shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
4704  task->shareds = &((char *)taskdata)[shareds_offset];
4705  KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
4706  0);
4707  }
4708  taskdata->td_alloc_thread = thread;
4709  taskdata->td_parent = parent_task;
4710  // task inherits the taskgroup from the parent task
4711  taskdata->td_taskgroup = parent_task->td_taskgroup;
4712  // tied task needs to initialize the td_last_tied at creation,
4713  // untied one does this when it is scheduled for execution
4714  if (taskdata->td_flags.tiedness == TASK_TIED)
4715  taskdata->td_last_tied = taskdata;
4716 
4717  // Only need to keep track of child task counts if team parallel and tasking
4718  // not serialized
4719  if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
4720  KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
4721  if (parent_task->td_taskgroup)
4722  KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
4723  // Only need to keep track of allocated child tasks for explicit tasks since
4724  // implicit not deallocated
4725  if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
4726  KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
4727  }
4728 
4729  KA_TRACE(20,
4730  ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
4731  thread, taskdata, taskdata->td_parent));
4732 #if OMPT_SUPPORT
4733  if (UNLIKELY(ompt_enabled.enabled))
4734  __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
4735 #endif
4736  return task;
4737 }
4738 
4739 // Routine optionally generated by the compiler for setting the lastprivate flag
4740 // and calling needed constructors for private/firstprivate objects
4741 // (used to form taskloop tasks from pattern task)
4742 // Parameters: dest task, src task, lastprivate flag.
4743 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
4744 
4745 KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8);
4746 
4747 // class to encapsulate manipulating loop bounds in a taskloop task.
4748 // this abstracts away the Intel vs GOMP taskloop interface for setting/getting
4749 // the loop bound variables.
4750 class kmp_taskloop_bounds_t {
4751  kmp_task_t *task;
4752  const kmp_taskdata_t *taskdata;
4753  size_t lower_offset;
4754  size_t upper_offset;
4755 
4756 public:
4757  kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
4758  : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
4759  lower_offset((char *)lb - (char *)task),
4760  upper_offset((char *)ub - (char *)task) {
4761  KMP_DEBUG_ASSERT((char *)lb > (char *)_task);
4762  KMP_DEBUG_ASSERT((char *)ub > (char *)_task);
4763  }
4764  kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
4765  : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
4766  lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
4767  size_t get_lower_offset() const { return lower_offset; }
4768  size_t get_upper_offset() const { return upper_offset; }
4769  kmp_uint64 get_lb() const {
4770  kmp_int64 retval;
4771 #if defined(KMP_GOMP_COMPAT)
4772  // Intel task just returns the lower bound normally
4773  if (!taskdata->td_flags.native) {
4774  retval = *(kmp_int64 *)((char *)task + lower_offset);
4775  } else {
4776  // GOMP task has to take into account the sizeof(long)
4777  if (taskdata->td_size_loop_bounds == 4) {
4778  kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds);
4779  retval = (kmp_int64)*lb;
4780  } else {
4781  kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds);
4782  retval = (kmp_int64)*lb;
4783  }
4784  }
4785 #else
4786  (void)taskdata;
4787  retval = *(kmp_int64 *)((char *)task + lower_offset);
4788 #endif // defined(KMP_GOMP_COMPAT)
4789  return retval;
4790  }
4791  kmp_uint64 get_ub() const {
4792  kmp_int64 retval;
4793 #if defined(KMP_GOMP_COMPAT)
4794  // Intel task just returns the upper bound normally
4795  if (!taskdata->td_flags.native) {
4796  retval = *(kmp_int64 *)((char *)task + upper_offset);
4797  } else {
4798  // GOMP task has to take into account the sizeof(long)
4799  if (taskdata->td_size_loop_bounds == 4) {
4800  kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1;
4801  retval = (kmp_int64)*ub;
4802  } else {
4803  kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1;
4804  retval = (kmp_int64)*ub;
4805  }
4806  }
4807 #else
4808  retval = *(kmp_int64 *)((char *)task + upper_offset);
4809 #endif // defined(KMP_GOMP_COMPAT)
4810  return retval;
4811  }
4812  void set_lb(kmp_uint64 lb) {
4813 #if defined(KMP_GOMP_COMPAT)
4814  // Intel task just sets the lower bound normally
4815  if (!taskdata->td_flags.native) {
4816  *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4817  } else {
4818  // GOMP task has to take into account the sizeof(long)
4819  if (taskdata->td_size_loop_bounds == 4) {
4820  kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds);
4821  *lower = (kmp_uint32)lb;
4822  } else {
4823  kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds);
4824  *lower = (kmp_uint64)lb;
4825  }
4826  }
4827 #else
4828  *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4829 #endif // defined(KMP_GOMP_COMPAT)
4830  }
4831  void set_ub(kmp_uint64 ub) {
4832 #if defined(KMP_GOMP_COMPAT)
4833  // Intel task just sets the upper bound normally
4834  if (!taskdata->td_flags.native) {
4835  *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4836  } else {
4837  // GOMP task has to take into account the sizeof(long)
4838  if (taskdata->td_size_loop_bounds == 4) {
4839  kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1;
4840  *upper = (kmp_uint32)ub;
4841  } else {
4842  kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1;
4843  *upper = (kmp_uint64)ub;
4844  }
4845  }
4846 #else
4847  *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4848 #endif // defined(KMP_GOMP_COMPAT)
4849  }
4850 };
4851 
4852 // __kmp_taskloop_linear: Start tasks of the taskloop linearly
4853 //
4854 // loc Source location information
4855 // gtid Global thread ID
4856 // task Pattern task, exposes the loop iteration range
4857 // lb Pointer to loop lower bound in task structure
4858 // ub Pointer to loop upper bound in task structure
4859 // st Loop stride
4860 // ub_glob Global upper bound (used for lastprivate check)
4861 // num_tasks Number of tasks to execute
4862 // grainsize Number of loop iterations per task
4863 // extras Number of chunks with grainsize+1 iterations
4864 // last_chunk Reduction of grainsize for last task
4865 // tc Iterations count
4866 // task_dup Tasks duplication routine
4867 // codeptr_ra Return address for OMPT events
4868 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
4869  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4870  kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4871  kmp_uint64 grainsize, kmp_uint64 extras,
4872  kmp_int64 last_chunk, kmp_uint64 tc,
4873 #if OMPT_SUPPORT
4874  void *codeptr_ra,
4875 #endif
4876  void *task_dup) {
4877  KMP_COUNT_BLOCK(OMP_TASKLOOP);
4878  KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
4879  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4880  // compiler provides global bounds here
4881  kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4882  kmp_uint64 lower = task_bounds.get_lb();
4883  kmp_uint64 upper = task_bounds.get_ub();
4884  kmp_uint64 i;
4885  kmp_info_t *thread = __kmp_threads[gtid];
4886  kmp_taskdata_t *current_task = thread->th.th_current_task;
4887  kmp_task_t *next_task;
4888  kmp_int32 lastpriv = 0;
4889 
4890  KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
4891  (last_chunk < 0 ? last_chunk : extras));
4892  KMP_DEBUG_ASSERT(num_tasks > extras);
4893  KMP_DEBUG_ASSERT(num_tasks > 0);
4894  KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
4895  "extras %lld, last_chunk %lld, i=%lld,%lld(%d)%lld, dup %p\n",
4896  gtid, num_tasks, grainsize, extras, last_chunk, lower, upper,
4897  ub_glob, st, task_dup));
4898 
4899  // Launch num_tasks tasks, assign grainsize iterations each task
4900  for (i = 0; i < num_tasks; ++i) {
4901  kmp_uint64 chunk_minus_1;
4902  if (extras == 0) {
4903  chunk_minus_1 = grainsize - 1;
4904  } else {
4905  chunk_minus_1 = grainsize;
4906  --extras; // first extras iterations get bigger chunk (grainsize+1)
4907  }
4908  upper = lower + st * chunk_minus_1;
4909  if (upper > *ub) {
4910  upper = *ub;
4911  }
4912  if (i == num_tasks - 1) {
4913  // schedule the last task, set lastprivate flag if needed
4914  if (st == 1) { // most common case
4915  KMP_DEBUG_ASSERT(upper == *ub);
4916  if (upper == ub_glob)
4917  lastpriv = 1;
4918  } else if (st > 0) { // positive loop stride
4919  KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
4920  if ((kmp_uint64)st > ub_glob - upper)
4921  lastpriv = 1;
4922  } else { // negative loop stride
4923  KMP_DEBUG_ASSERT(upper + st < *ub);
4924  if (upper - ub_glob < (kmp_uint64)(-st))
4925  lastpriv = 1;
4926  }
4927  }
4928 
4929 #if OMPX_TASKGRAPH
4930  next_task = __kmp_task_dup_alloc(thread, task, /* taskloop_recur */ 0);
4931 #else
4932  next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
4933 #endif
4934 
4935  kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task);
4936  kmp_taskloop_bounds_t next_task_bounds =
4937  kmp_taskloop_bounds_t(next_task, task_bounds);
4938 
4939  // adjust task-specific bounds
4940  next_task_bounds.set_lb(lower);
4941  if (next_taskdata->td_flags.native) {
4942  next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
4943  } else {
4944  next_task_bounds.set_ub(upper);
4945  }
4946  if (ptask_dup != NULL) // set lastprivate flag, construct firstprivates,
4947  // etc.
4948  ptask_dup(next_task, task, lastpriv);
4949  KA_TRACE(40,
4950  ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
4951  "upper %lld stride %lld, (offsets %p %p)\n",
4952  gtid, i, next_task, lower, upper, st,
4953  next_task_bounds.get_lower_offset(),
4954  next_task_bounds.get_upper_offset()));
4955 #if OMPT_SUPPORT
4956  __kmp_omp_taskloop_task(NULL, gtid, next_task,
4957  codeptr_ra); // schedule new task
4958 #if OMPT_OPTIONAL
4959  if (ompt_enabled.ompt_callback_dispatch) {
4960  OMPT_GET_DISPATCH_CHUNK(next_taskdata->ompt_task_info.dispatch_chunk,
4961  lower, upper, st);
4962  }
4963 #endif // OMPT_OPTIONAL
4964 #else
4965  __kmp_omp_task(gtid, next_task, true); // schedule new task
4966 #endif
4967  lower = upper + st; // adjust lower bound for the next iteration
4968  }
4969  // free the pattern task and exit
4970  __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
4971  // do not execute the pattern task, just do internal bookkeeping
4972  __kmp_task_finish<false>(gtid, task, current_task);
4973 }
4974 
4975 // Structure to keep taskloop parameters for auxiliary task
4976 // kept in the shareds of the task structure.
4977 typedef struct __taskloop_params {
4978  kmp_task_t *task;
4979  kmp_uint64 *lb;
4980  kmp_uint64 *ub;
4981  void *task_dup;
4982  kmp_int64 st;
4983  kmp_uint64 ub_glob;
4984  kmp_uint64 num_tasks;
4985  kmp_uint64 grainsize;
4986  kmp_uint64 extras;
4987  kmp_int64 last_chunk;
4988  kmp_uint64 tc;
4989  kmp_uint64 num_t_min;
4990 #if OMPT_SUPPORT
4991  void *codeptr_ra;
4992 #endif
4993 } __taskloop_params_t;
4994 
4995 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
4996  kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
4997  kmp_uint64, kmp_uint64, kmp_int64, kmp_uint64,
4998  kmp_uint64,
4999 #if OMPT_SUPPORT
5000  void *,
5001 #endif
5002  void *);
5003 
5004 // Execute part of the taskloop submitted as a task.
5005 int __kmp_taskloop_task(int gtid, void *ptask) {
5006  __taskloop_params_t *p =
5007  (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
5008  kmp_task_t *task = p->task;
5009  kmp_uint64 *lb = p->lb;
5010  kmp_uint64 *ub = p->ub;
5011  void *task_dup = p->task_dup;
5012  // p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
5013  kmp_int64 st = p->st;
5014  kmp_uint64 ub_glob = p->ub_glob;
5015  kmp_uint64 num_tasks = p->num_tasks;
5016  kmp_uint64 grainsize = p->grainsize;
5017  kmp_uint64 extras = p->extras;
5018  kmp_int64 last_chunk = p->last_chunk;
5019  kmp_uint64 tc = p->tc;
5020  kmp_uint64 num_t_min = p->num_t_min;
5021 #if OMPT_SUPPORT
5022  void *codeptr_ra = p->codeptr_ra;
5023 #endif
5024 #if KMP_DEBUG
5025  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5026  KMP_DEBUG_ASSERT(task != NULL);
5027  KA_TRACE(20,
5028  ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
5029  " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n",
5030  gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
5031  st, task_dup));
5032 #endif
5033  KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
5034  if (num_tasks > num_t_min)
5035  __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
5036  grainsize, extras, last_chunk, tc, num_t_min,
5037 #if OMPT_SUPPORT
5038  codeptr_ra,
5039 #endif
5040  task_dup);
5041  else
5042  __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
5043  grainsize, extras, last_chunk, tc,
5044 #if OMPT_SUPPORT
5045  codeptr_ra,
5046 #endif
5047  task_dup);
5048 
5049  KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
5050  return 0;
5051 }
5052 
5053 // Schedule part of the taskloop as a task,
5054 // execute the rest of the taskloop.
5055 //
5056 // loc Source location information
5057 // gtid Global thread ID
5058 // task Pattern task, exposes the loop iteration range
5059 // lb Pointer to loop lower bound in task structure
5060 // ub Pointer to loop upper bound in task structure
5061 // st Loop stride
5062 // ub_glob Global upper bound (used for lastprivate check)
5063 // num_tasks Number of tasks to execute
5064 // grainsize Number of loop iterations per task
5065 // extras Number of chunks with grainsize+1 iterations
5066 // last_chunk Reduction of grainsize for last task
5067 // tc Iterations count
5068 // num_t_min Threshold to launch tasks recursively
5069 // task_dup Tasks duplication routine
5070 // codeptr_ra Return address for OMPT events
5071 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
5072  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
5073  kmp_uint64 ub_glob, kmp_uint64 num_tasks,
5074  kmp_uint64 grainsize, kmp_uint64 extras,
5075  kmp_int64 last_chunk, kmp_uint64 tc,
5076  kmp_uint64 num_t_min,
5077 #if OMPT_SUPPORT
5078  void *codeptr_ra,
5079 #endif
5080  void *task_dup) {
5081  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5082  KMP_DEBUG_ASSERT(task != NULL);
5083  KMP_DEBUG_ASSERT(num_tasks > num_t_min);
5084  KA_TRACE(20,
5085  ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
5086  " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n",
5087  gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
5088  st, task_dup));
5089  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
5090  kmp_uint64 lower = *lb;
5091  kmp_info_t *thread = __kmp_threads[gtid];
5092  // kmp_taskdata_t *current_task = thread->th.th_current_task;
5093  kmp_task_t *next_task;
5094  size_t lower_offset =
5095  (char *)lb - (char *)task; // remember offset of lb in the task structure
5096  size_t upper_offset =
5097  (char *)ub - (char *)task; // remember offset of ub in the task structure
5098 
5099  KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
5100  (last_chunk < 0 ? last_chunk : extras));
5101  KMP_DEBUG_ASSERT(num_tasks > extras);
5102  KMP_DEBUG_ASSERT(num_tasks > 0);
5103 
5104  // split the loop in two halves
5105  kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
5106  kmp_int64 last_chunk0 = 0, last_chunk1 = 0;
5107  kmp_uint64 gr_size0 = grainsize;
5108  kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
5109  kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
5110  if (last_chunk < 0) {
5111  ext0 = ext1 = 0;
5112  last_chunk1 = last_chunk;
5113  tc0 = grainsize * n_tsk0;
5114  tc1 = tc - tc0;
5115  } else if (n_tsk0 <= extras) {
5116  gr_size0++; // integrate extras into grainsize
5117  ext0 = 0; // no extra iters in 1st half
5118  ext1 = extras - n_tsk0; // remaining extras
5119  tc0 = gr_size0 * n_tsk0;
5120  tc1 = tc - tc0;
5121  } else { // n_tsk0 > extras
5122  ext1 = 0; // no extra iters in 2nd half
5123  ext0 = extras;
5124  tc1 = grainsize * n_tsk1;
5125  tc0 = tc - tc1;
5126  }
5127  ub0 = lower + st * (tc0 - 1);
5128  lb1 = ub0 + st;
5129 
5130  // create pattern task for 2nd half of the loop
5131 #if OMPX_TASKGRAPH
5132  next_task = __kmp_task_dup_alloc(thread, task,
5133  /* taskloop_recur */ 1);
5134 #else
5135  next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
5136 #endif
5137  // adjust lower bound (upper bound is not changed) for the 2nd half
5138  *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
5139  if (ptask_dup != NULL) // construct firstprivates, etc.
5140  ptask_dup(next_task, task, 0);
5141  *ub = ub0; // adjust upper bound for the 1st half
5142 
5143  // create auxiliary task for 2nd half of the loop
5144  // make sure new task has same parent task as the pattern task
5145  kmp_taskdata_t *current_task = thread->th.th_current_task;
5146  thread->th.th_current_task = taskdata->td_parent;
5147  kmp_task_t *new_task =
5148  __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
5149  sizeof(__taskloop_params_t), &__kmp_taskloop_task);
5150  // restore current task
5151  thread->th.th_current_task = current_task;
5152  __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
5153  p->task = next_task;
5154  p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
5155  p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
5156  p->task_dup = task_dup;
5157  p->st = st;
5158  p->ub_glob = ub_glob;
5159  p->num_tasks = n_tsk1;
5160  p->grainsize = grainsize;
5161  p->extras = ext1;
5162  p->last_chunk = last_chunk1;
5163  p->tc = tc1;
5164  p->num_t_min = num_t_min;
5165 #if OMPT_SUPPORT
5166  p->codeptr_ra = codeptr_ra;
5167 #endif
5168 
5169 #if OMPX_TASKGRAPH
5170  kmp_taskdata_t *new_task_data = KMP_TASK_TO_TASKDATA(new_task);
5171  new_task_data->tdg = taskdata->tdg;
5172  new_task_data->is_taskgraph = 0;
5173 #endif
5174 
5175 #if OMPT_SUPPORT
5176  // schedule new task with correct return address for OMPT events
5177  __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra);
5178 #else
5179  __kmp_omp_task(gtid, new_task, true); // schedule new task
5180 #endif
5181 
5182  // execute the 1st half of current subrange
5183  if (n_tsk0 > num_t_min)
5184  __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
5185  ext0, last_chunk0, tc0, num_t_min,
5186 #if OMPT_SUPPORT
5187  codeptr_ra,
5188 #endif
5189  task_dup);
5190  else
5191  __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
5192  gr_size0, ext0, last_chunk0, tc0,
5193 #if OMPT_SUPPORT
5194  codeptr_ra,
5195 #endif
5196  task_dup);
5197 
5198  KA_TRACE(40, ("__kmp_taskloop_recur(exit): T#%d\n", gtid));
5199 }
5200 
5201 static void __kmp_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
5202  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
5203  int nogroup, int sched, kmp_uint64 grainsize,
5204  int modifier, void *task_dup) {
5205  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5206  KMP_DEBUG_ASSERT(task != NULL);
5207  if (nogroup == 0) {
5208 #if OMPT_SUPPORT && OMPT_OPTIONAL
5209  OMPT_STORE_RETURN_ADDRESS(gtid);
5210 #endif
5211  __kmpc_taskgroup(loc, gtid);
5212  }
5213 
5214 #if OMPX_TASKGRAPH
5215  KMP_ATOMIC_DEC(&__kmp_tdg_task_id);
5216 #endif
5217  // =========================================================================
5218  // calculate loop parameters
5219  kmp_taskloop_bounds_t task_bounds(task, lb, ub);
5220  kmp_uint64 tc;
5221  // compiler provides global bounds here
5222  kmp_uint64 lower = task_bounds.get_lb();
5223  kmp_uint64 upper = task_bounds.get_ub();
5224  kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
5225  kmp_uint64 num_tasks = 0, extras = 0;
5226  kmp_int64 last_chunk =
5227  0; // reduce grainsize of last task by last_chunk in strict mode
5228  kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
5229  kmp_info_t *thread = __kmp_threads[gtid];
5230  kmp_taskdata_t *current_task = thread->th.th_current_task;
5231 
5232  KA_TRACE(20, ("__kmp_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
5233  "grain %llu(%d, %d), dup %p\n",
5234  gtid, taskdata, lower, upper, st, grainsize, sched, modifier,
5235  task_dup));
5236 
5237  // compute trip count
5238  if (st == 1) { // most common case
5239  tc = upper - lower + 1;
5240  } else if (st < 0) {
5241  tc = (lower - upper) / (-st) + 1;
5242  } else { // st > 0
5243  tc = (upper - lower) / st + 1;
5244  }
5245  if (tc == 0) {
5246  KA_TRACE(20, ("__kmp_taskloop(exit): T#%d zero-trip loop\n", gtid));
5247  // free the pattern task and exit
5248  __kmp_task_start(gtid, task, current_task);
5249  // do not execute anything for zero-trip loop
5250  __kmp_task_finish<false>(gtid, task, current_task);
5251  return;
5252  }
5253 
5254 #if OMPT_SUPPORT && OMPT_OPTIONAL
5255  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
5256  ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
5257  if (ompt_enabled.ompt_callback_work) {
5258  ompt_callbacks.ompt_callback(ompt_callback_work)(
5259  ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
5260  &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
5261  }
5262 #endif
5263 
5264  if (num_tasks_min == 0)
5265  // TODO: can we choose better default heuristic?
5266  num_tasks_min =
5267  KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
5268 
5269  // compute num_tasks/grainsize based on the input provided
5270  switch (sched) {
5271  case 0: // no schedule clause specified, we can choose the default
5272  // let's try to schedule (team_size*10) tasks
5273  grainsize = thread->th.th_team_nproc * 10;
5274  KMP_FALLTHROUGH();
5275  case 2: // num_tasks provided
5276  if (grainsize > tc) {
5277  num_tasks = tc; // too big num_tasks requested, adjust values
5278  grainsize = 1;
5279  extras = 0;
5280  } else {
5281  num_tasks = grainsize;
5282  grainsize = tc / num_tasks;
5283  extras = tc % num_tasks;
5284  }
5285  break;
5286  case 1: // grainsize provided
5287  if (grainsize > tc) {
5288  num_tasks = 1;
5289  grainsize = tc; // too big grainsize requested, adjust values
5290  extras = 0;
5291  } else {
5292  if (modifier) {
5293  num_tasks = (tc + grainsize - 1) / grainsize;
5294  last_chunk = tc - (num_tasks * grainsize);
5295  extras = 0;
5296  } else {
5297  num_tasks = tc / grainsize;
5298  // adjust grainsize for balanced distribution of iterations
5299  grainsize = tc / num_tasks;
5300  extras = tc % num_tasks;
5301  }
5302  }
5303  break;
5304  default:
5305  KMP_ASSERT2(0, "unknown scheduling of taskloop");
5306  }
5307 
5308  KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
5309  (last_chunk < 0 ? last_chunk : extras));
5310  KMP_DEBUG_ASSERT(num_tasks > extras);
5311  KMP_DEBUG_ASSERT(num_tasks > 0);
5312  // =========================================================================
5313 
5314  // check if clause value first
5315  // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
5316  if (if_val == 0) { // if(0) specified, mark task as serial
5317  taskdata->td_flags.task_serial = 1;
5318  taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
5319  // always start serial tasks linearly
5320  __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
5321  grainsize, extras, last_chunk, tc,
5322 #if OMPT_SUPPORT
5323  OMPT_GET_RETURN_ADDRESS(0),
5324 #endif
5325  task_dup);
5326  // !taskdata->td_flags.native => currently force linear spawning of tasks
5327  // for GOMP_taskloop
5328  } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
5329  KA_TRACE(20, ("__kmp_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
5330  "(%lld), grain %llu, extras %llu, last_chunk %lld\n",
5331  gtid, tc, num_tasks, num_tasks_min, grainsize, extras,
5332  last_chunk));
5333  __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
5334  grainsize, extras, last_chunk, tc, num_tasks_min,
5335 #if OMPT_SUPPORT
5336  OMPT_GET_RETURN_ADDRESS(0),
5337 #endif
5338  task_dup);
5339  } else {
5340  KA_TRACE(20, ("__kmp_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
5341  "(%lld), grain %llu, extras %llu, last_chunk %lld\n",
5342  gtid, tc, num_tasks, num_tasks_min, grainsize, extras,
5343  last_chunk));
5344  __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
5345  grainsize, extras, last_chunk, tc,
5346 #if OMPT_SUPPORT
5347  OMPT_GET_RETURN_ADDRESS(0),
5348 #endif
5349  task_dup);
5350  }
5351 
5352 #if OMPT_SUPPORT && OMPT_OPTIONAL
5353  if (ompt_enabled.ompt_callback_work) {
5354  ompt_callbacks.ompt_callback(ompt_callback_work)(
5355  ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
5356  &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
5357  }
5358 #endif
5359 
5360  if (nogroup == 0) {
5361 #if OMPT_SUPPORT && OMPT_OPTIONAL
5362  OMPT_STORE_RETURN_ADDRESS(gtid);
5363 #endif
5364  __kmpc_end_taskgroup(loc, gtid);
5365  }
5366  KA_TRACE(20, ("__kmp_taskloop(exit): T#%d\n", gtid));
5367 }
5368 
5385 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
5386  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
5387  int sched, kmp_uint64 grainsize, void *task_dup) {
5388  __kmp_assert_valid_gtid(gtid);
5389  KA_TRACE(20, ("__kmpc_taskloop(enter): T#%d\n", gtid));
5390  __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize,
5391  0, task_dup);
5392  KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
5393 }
5394 
5412 void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
5413  kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
5414  int nogroup, int sched, kmp_uint64 grainsize,
5415  int modifier, void *task_dup) {
5416  __kmp_assert_valid_gtid(gtid);
5417  KA_TRACE(20, ("__kmpc_taskloop_5(enter): T#%d\n", gtid));
5418  __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize,
5419  modifier, task_dup);
5420  KA_TRACE(20, ("__kmpc_taskloop_5(exit): T#%d\n", gtid));
5421 }
5422 
5432  if (gtid == KMP_GTID_DNE)
5433  return NULL;
5434 
5435  kmp_info_t *thread = __kmp_thread_from_gtid(gtid);
5436  kmp_taskdata_t *taskdata = thread->th.th_current_task;
5437 
5438  if (!taskdata)
5439  return NULL;
5440 
5441  return &taskdata->td_target_data.async_handle;
5442 }
5443 
5452 bool __kmpc_omp_has_task_team(kmp_int32 gtid) {
5453  if (gtid == KMP_GTID_DNE)
5454  return FALSE;
5455 
5456  kmp_info_t *thread = __kmp_thread_from_gtid(gtid);
5457  kmp_taskdata_t *taskdata = thread->th.th_current_task;
5458 
5459  if (!taskdata)
5460  return FALSE;
5461 
5462  return taskdata->td_task_team != NULL;
5463 }
5464 
5465 #if OMPX_TASKGRAPH
5466 // __kmp_find_tdg: identify a TDG through its ID
5467 // gtid: Global Thread ID
5468 // tdg_id: ID of the TDG
5469 // returns: If a TDG corresponding to this ID is found and not
5470 // its initial state, return the pointer to it, otherwise nullptr
5471 static kmp_tdg_info_t *__kmp_find_tdg(kmp_int32 tdg_id) {
5472  kmp_tdg_info_t *res = nullptr;
5473  if (__kmp_max_tdgs == 0)
5474  return res;
5475 
5476  if (__kmp_global_tdgs == NULL)
5477  __kmp_global_tdgs = (kmp_tdg_info_t **)__kmp_allocate(
5478  sizeof(kmp_tdg_info_t *) * __kmp_max_tdgs);
5479 
5480  if ((__kmp_global_tdgs[tdg_id]) &&
5481  (__kmp_global_tdgs[tdg_id]->tdg_status != KMP_TDG_NONE))
5482  res = __kmp_global_tdgs[tdg_id];
5483  return res;
5484 }
5485 
5486 // __kmp_print_tdg_dot: prints the TDG to a dot file
5487 // tdg: ID of the TDG
5488 void __kmp_print_tdg_dot(kmp_tdg_info_t *tdg) {
5489  kmp_int32 tdg_id = tdg->tdg_id;
5490  KA_TRACE(10, ("__kmp_print_tdg_dot(enter): T#%d tdg_id=%d \n", gtid, tdg_id));
5491 
5492  char file_name[20];
5493  sprintf(file_name, "tdg_%d.dot", tdg_id);
5494  kmp_safe_raii_file_t tdg_file(file_name, "w");
5495 
5496  kmp_int32 num_tasks = KMP_ATOMIC_LD_RLX(&tdg->num_tasks);
5497  fprintf(tdg_file,
5498  "digraph TDG {\n"
5499  " compound=true\n"
5500  " subgraph cluster {\n"
5501  " label=TDG_%d\n",
5502  tdg_id);
5503  for (kmp_int32 i = 0; i < num_tasks; i++) {
5504  fprintf(tdg_file, " %d[style=bold]\n", i);
5505  }
5506  fprintf(tdg_file, " }\n");
5507  for (kmp_int32 i = 0; i < num_tasks; i++) {
5508  kmp_int32 nsuccessors = tdg->record_map[i].nsuccessors;
5509  kmp_int32 *successors = tdg->record_map[i].successors;
5510  if (nsuccessors > 0) {
5511  for (kmp_int32 j = 0; j < nsuccessors; j++)
5512  fprintf(tdg_file, " %d -> %d \n", i, successors[j]);
5513  }
5514  }
5515  fprintf(tdg_file, "}");
5516  KA_TRACE(10, ("__kmp_print_tdg_dot(exit): T#%d tdg_id=%d \n", gtid, tdg_id));
5517 }
5518 
5519 // __kmp_start_record: launch the execution of a previous
5520 // recorded TDG
5521 // gtid: Global Thread ID
5522 // tdg: ID of the TDG
5523 void __kmp_exec_tdg(kmp_int32 gtid, kmp_tdg_info_t *tdg) {
5524  KMP_DEBUG_ASSERT(tdg->tdg_status == KMP_TDG_READY);
5525  KA_TRACE(10, ("__kmp_exec_tdg(enter): T#%d tdg_id=%d num_roots=%d\n", gtid,
5526  tdg->tdg_id, tdg->num_roots));
5527  kmp_node_info_t *this_record_map = tdg->record_map;
5528  kmp_int32 *this_root_tasks = tdg->root_tasks;
5529  kmp_int32 this_num_roots = tdg->num_roots;
5530  kmp_int32 this_num_tasks = KMP_ATOMIC_LD_RLX(&tdg->num_tasks);
5531 
5532  kmp_info_t *thread = __kmp_threads[gtid];
5533  kmp_taskdata_t *parent_task = thread->th.th_current_task;
5534 
5535  if (tdg->rec_taskred_data) {
5536  __kmpc_taskred_init(gtid, tdg->rec_num_taskred, tdg->rec_taskred_data);
5537  }
5538 
5539  for (kmp_int32 j = 0; j < this_num_tasks; j++) {
5540  kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(this_record_map[j].task);
5541 
5542  td->td_parent = parent_task;
5543  this_record_map[j].parent_task = parent_task;
5544 
5545  kmp_taskgroup_t *parent_taskgroup =
5546  this_record_map[j].parent_task->td_taskgroup;
5547 
5548  KMP_ATOMIC_ST_RLX(&this_record_map[j].npredecessors_counter,
5549  this_record_map[j].npredecessors);
5550  KMP_ATOMIC_INC(&this_record_map[j].parent_task->td_incomplete_child_tasks);
5551 
5552  if (parent_taskgroup) {
5553  KMP_ATOMIC_INC(&parent_taskgroup->count);
5554  // The taskgroup is different so we must update it
5555  td->td_taskgroup = parent_taskgroup;
5556  } else if (td->td_taskgroup != nullptr) {
5557  // If the parent doesnt have a taskgroup, remove it from the task
5558  td->td_taskgroup = nullptr;
5559  }
5560  if (this_record_map[j].parent_task->td_flags.tasktype == TASK_EXPLICIT)
5561  KMP_ATOMIC_INC(&this_record_map[j].parent_task->td_allocated_child_tasks);
5562  }
5563 
5564  for (kmp_int32 j = 0; j < this_num_roots; ++j) {
5565  __kmp_omp_task(gtid, this_record_map[this_root_tasks[j]].task, true);
5566  }
5567  KA_TRACE(10, ("__kmp_exec_tdg(exit): T#%d tdg_id=%d num_roots=%d\n", gtid,
5568  tdg->tdg_id, tdg->num_roots));
5569 }
5570 
5571 // __kmp_start_record: set up a TDG structure and turn the
5572 // recording flag to true
5573 // gtid: Global Thread ID of the encountering thread
5574 // input_flags: Flags associated with the TDG
5575 // tdg_id: ID of the TDG to record
5576 static inline void __kmp_start_record(kmp_int32 gtid,
5577  kmp_taskgraph_flags_t *flags,
5578  kmp_int32 tdg_id) {
5579  kmp_tdg_info_t *tdg =
5580  (kmp_tdg_info_t *)__kmp_allocate(sizeof(kmp_tdg_info_t));
5581  __kmp_global_tdgs[__kmp_curr_tdg_idx] = tdg;
5582  // Initializing the TDG structure
5583  tdg->tdg_id = tdg_id;
5584  tdg->map_size = INIT_MAPSIZE;
5585  tdg->num_roots = -1;
5586  tdg->root_tasks = nullptr;
5587  tdg->tdg_status = KMP_TDG_RECORDING;
5588  tdg->rec_num_taskred = 0;
5589  tdg->rec_taskred_data = nullptr;
5590  KMP_ATOMIC_ST_RLX(&tdg->num_tasks, 0);
5591 
5592  // Initializing the list of nodes in this TDG
5593  kmp_node_info_t *this_record_map =
5594  (kmp_node_info_t *)__kmp_allocate(INIT_MAPSIZE * sizeof(kmp_node_info_t));
5595  for (kmp_int32 i = 0; i < INIT_MAPSIZE; i++) {
5596  kmp_int32 *successorsList =
5597  (kmp_int32 *)__kmp_allocate(__kmp_successors_size * sizeof(kmp_int32));
5598  this_record_map[i].task = nullptr;
5599  this_record_map[i].successors = successorsList;
5600  this_record_map[i].nsuccessors = 0;
5601  this_record_map[i].npredecessors = 0;
5602  this_record_map[i].successors_size = __kmp_successors_size;
5603  KMP_ATOMIC_ST_RLX(&this_record_map[i].npredecessors_counter, 0);
5604  }
5605 
5606  __kmp_global_tdgs[__kmp_curr_tdg_idx]->record_map = this_record_map;
5607 }
5608 
5609 // __kmpc_start_record_task: Wrapper around __kmp_start_record to mark
5610 // the beginning of the record process of a task region
5611 // loc_ref: Location of TDG, not used yet
5612 // gtid: Global Thread ID of the encountering thread
5613 // input_flags: Flags associated with the TDG
5614 // tdg_id: ID of the TDG to record, for now, incremental integer
5615 // returns: 1 if we record, otherwise, 0
5616 kmp_int32 __kmpc_start_record_task(ident_t *loc_ref, kmp_int32 gtid,
5617  kmp_int32 input_flags, kmp_int32 tdg_id) {
5618 
5619  kmp_int32 res;
5620  kmp_taskgraph_flags_t *flags = (kmp_taskgraph_flags_t *)&input_flags;
5621  KA_TRACE(10,
5622  ("__kmpc_start_record_task(enter): T#%d loc=%p flags=%d tdg_id=%d\n",
5623  gtid, loc_ref, input_flags, tdg_id));
5624 
5625  if (__kmp_max_tdgs == 0) {
5626  KA_TRACE(
5627  10,
5628  ("__kmpc_start_record_task(abandon): T#%d loc=%p flags=%d tdg_id = %d, "
5629  "__kmp_max_tdgs = 0\n",
5630  gtid, loc_ref, input_flags, tdg_id));
5631  return 1;
5632  }
5633 
5634  __kmpc_taskgroup(loc_ref, gtid);
5635  if (kmp_tdg_info_t *tdg = __kmp_find_tdg(tdg_id)) {
5636  // TODO: use re_record flag
5637  __kmp_exec_tdg(gtid, tdg);
5638  res = 0;
5639  } else {
5640  __kmp_curr_tdg_idx = tdg_id;
5641  KMP_DEBUG_ASSERT(__kmp_curr_tdg_idx < __kmp_max_tdgs);
5642  __kmp_start_record(gtid, flags, tdg_id);
5643  __kmp_num_tdg++;
5644  res = 1;
5645  }
5646  KA_TRACE(10, ("__kmpc_start_record_task(exit): T#%d TDG %d starts to %s\n",
5647  gtid, tdg_id, res ? "record" : "execute"));
5648  return res;
5649 }
5650 
5651 // __kmp_end_record: set up a TDG after recording it
5652 // gtid: Global thread ID
5653 // tdg: Pointer to the TDG
5654 void __kmp_end_record(kmp_int32 gtid, kmp_tdg_info_t *tdg) {
5655  // Store roots
5656  kmp_node_info_t *this_record_map = tdg->record_map;
5657  kmp_int32 this_num_tasks = KMP_ATOMIC_LD_RLX(&tdg->num_tasks);
5658  kmp_int32 *this_root_tasks =
5659  (kmp_int32 *)__kmp_allocate(this_num_tasks * sizeof(kmp_int32));
5660  kmp_int32 this_map_size = tdg->map_size;
5661  kmp_int32 this_num_roots = 0;
5662  kmp_info_t *thread = __kmp_threads[gtid];
5663 
5664  for (kmp_int32 i = 0; i < this_num_tasks; i++) {
5665  if (this_record_map[i].npredecessors == 0) {
5666  this_root_tasks[this_num_roots++] = i;
5667  }
5668  }
5669 
5670  // Update with roots info and mapsize
5671  tdg->map_size = this_map_size;
5672  tdg->num_roots = this_num_roots;
5673  tdg->root_tasks = this_root_tasks;
5674  KMP_DEBUG_ASSERT(tdg->tdg_status == KMP_TDG_RECORDING);
5675  tdg->tdg_status = KMP_TDG_READY;
5676 
5677  if (thread->th.th_current_task->td_dephash) {
5678  __kmp_dephash_free(thread, thread->th.th_current_task->td_dephash);
5679  thread->th.th_current_task->td_dephash = NULL;
5680  }
5681 
5682  // Reset predecessor counter
5683  for (kmp_int32 i = 0; i < this_num_tasks; i++) {
5684  KMP_ATOMIC_ST_RLX(&this_record_map[i].npredecessors_counter,
5685  this_record_map[i].npredecessors);
5686  }
5687  KMP_ATOMIC_ST_RLX(&__kmp_tdg_task_id, 0);
5688 
5689  if (__kmp_tdg_dot)
5690  __kmp_print_tdg_dot(tdg);
5691 }
5692 
5693 // __kmpc_end_record_task: wrapper around __kmp_end_record to mark
5694 // the end of recording phase
5695 //
5696 // loc_ref: Source location information
5697 // gtid: Global thread ID
5698 // input_flags: Flags attached to the graph
5699 // tdg_id: ID of the TDG just finished recording
5700 void __kmpc_end_record_task(ident_t *loc_ref, kmp_int32 gtid,
5701  kmp_int32 input_flags, kmp_int32 tdg_id) {
5702  kmp_tdg_info_t *tdg = __kmp_find_tdg(tdg_id);
5703 
5704  KA_TRACE(10, ("__kmpc_end_record_task(enter): T#%d loc=%p finishes recording"
5705  " tdg=%d with flags=%d\n",
5706  gtid, loc_ref, tdg_id, input_flags));
5707  if (__kmp_max_tdgs) {
5708  // TODO: use input_flags->nowait
5709  __kmpc_end_taskgroup(loc_ref, gtid);
5710  if (__kmp_tdg_is_recording(tdg->tdg_status))
5711  __kmp_end_record(gtid, tdg);
5712  }
5713  KA_TRACE(10, ("__kmpc_end_record_task(exit): T#%d loc=%p finished recording"
5714  " tdg=%d, its status is now READY\n",
5715  gtid, loc_ref, tdg_id));
5716 }
5717 #endif
struct kmp_taskred_data kmp_taskred_data_t
struct kmp_task_red_input kmp_task_red_input_t
struct kmp_taskred_flags kmp_taskred_flags_t
struct kmp_taskred_input kmp_taskred_input_t
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
Definition: kmp_stats.h:911
void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, void *task_dup)
void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
void * __kmpc_taskred_init(int gtid, int num, void *data)
void * __kmpc_task_reduction_init(int gtid, int num, void *data)
bool __kmpc_omp_has_task_team(kmp_int32 gtid)
void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, int modifier, void *task_dup)
void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
void * __kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data)
void ** __kmpc_omp_get_target_async_handle_ptr(kmp_int32 gtid)
Definition: kmp.h:246
kmp_taskred_flags_t flags
kmp_taskred_flags_t flags
kmp_taskred_flags_t flags