aboutsummaryrefslogtreecommitdiff
path: root/libomptarget/deviceRTLs/nvptx/src/reduction.cu
blob: aedb6359f9acb0d874db073ed9b26d6283d30540 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
//===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//

#include <complex.h>
#include <stdio.h>

#include "omptarget-nvptx.h"

// may eventually remove this
EXTERN
int32_t __gpu_block_reduce() {
  int tid = GetLogicalThreadIdInBlock();
  int nt = GetNumberOfOmpThreads(tid, isSPMDMode(), isRuntimeUninitialized());
  if (nt != blockDim.x)
    return 0;
  unsigned tnum = __ACTIVEMASK();
  if (tnum != (~0x0)) { // assume swapSize is 32
    return 0;
  }
  return 1;
}

EXTERN
int32_t __kmpc_reduce_gpu(kmp_Indent *loc, int32_t global_tid, int32_t num_vars,
                          size_t reduce_size, void *reduce_data,
                          void *reduce_array_size, kmp_ReductFctPtr *reductFct,
                          kmp_CriticalName *lck) {
  int threadId = GetLogicalThreadIdInBlock();
  omptarget_nvptx_TaskDescr *currTaskDescr = getMyTopTaskDescriptor(threadId);
  int numthread;
  if (currTaskDescr->IsParallelConstruct()) {
    numthread =
        GetNumberOfOmpThreads(threadId, isSPMDMode(), isRuntimeUninitialized());
  } else {
    numthread = GetNumberOfOmpTeams();
  }

  if (numthread == 1)
    return 1;
  else if (!__gpu_block_reduce())
    return 2;
  else {
    if (threadIdx.x == 0)
      return 1;
    else
      return 0;
  }
}

EXTERN
int32_t __kmpc_reduce_combined(kmp_Indent *loc) {
  if (threadIdx.x == 0) {
    return 2;
  } else {
    return 0;
  }
}

EXTERN
int32_t __kmpc_reduce_simd(kmp_Indent *loc) {
  if (threadIdx.x % 32 == 0) {
    return 1;
  } else {
    return 0;
  }
}

EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}

EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}

EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
  return __SHFL_DOWN_SYNC(0xFFFFFFFF, val, delta, size);
}

EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
  int lo, hi;
  asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val));
  hi = __SHFL_DOWN_SYNC(0xFFFFFFFF, hi, delta, size);
  lo = __SHFL_DOWN_SYNC(0xFFFFFFFF, lo, delta, size);
  asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi));
  return val;
}

static INLINE void gpu_regular_warp_reduce(void *reduce_data,
                                           kmp_ShuffleReductFctPtr shflFct) {
  for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
    shflFct(reduce_data, /*LaneId - not used= */ 0,
            /*Offset = */ mask, /*AlgoVersion=*/0);
  }
}

static INLINE void gpu_irregular_warp_reduce(void *reduce_data,
                                             kmp_ShuffleReductFctPtr shflFct,
                                             uint32_t size, uint32_t tid) {
  uint32_t curr_size;
  uint32_t mask;
  curr_size = size;
  mask = curr_size / 2;
  while (mask > 0) {
    shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
    curr_size = (curr_size + 1) / 2;
    mask = curr_size / 2;
  }
}

static INLINE uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
  uint32_t lanemask_lt;
  uint32_t lanemask_gt;
  uint32_t size, remote_id, physical_lane_id;
  physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
  asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt));
  uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
  uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2;
  asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt));
  do {
    Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
    remote_id = __ffs(Liveness & lanemask_gt);
    size = __popc(Liveness);
    logical_lane_id /= 2;
    shflFct(reduce_data, /*LaneId =*/logical_lane_id,
            /*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
  } while (logical_lane_id % 2 == 0 && size > 1);
  return (logical_lane_id == 0);
}

EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
                                        size_t reduce_size, void *reduce_data,
                                        kmp_ShuffleReductFctPtr shflFct,
                                        kmp_InterWarpCopyFctPtr cpyFct) {
  uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
  if (Liveness == 0xffffffff) {
    gpu_regular_warp_reduce(reduce_data, shflFct);
    return GetThreadIdInBlock() % WARPSIZE ==
           0; // Result on lane 0 of the simd warp.
  } else {
    return gpu_irregular_simd_reduce(
        reduce_data, shflFct); // Result on the first active lane.
  }
}

INLINE
int32_t nvptx_parallel_reduce_nowait(int32_t global_tid, int32_t num_vars,
                                     size_t reduce_size, void *reduce_data,
                                     kmp_ShuffleReductFctPtr shflFct,
                                     kmp_InterWarpCopyFctPtr cpyFct,
                                     bool isSPMDExecutionMode,
                                     bool isRuntimeUninitialized = false) {
  uint32_t BlockThreadId = GetLogicalThreadIdInBlock();
  uint32_t NumThreads = GetNumberOfOmpThreads(
      BlockThreadId, isSPMDExecutionMode, isRuntimeUninitialized);
  if (NumThreads == 1)
    return 1;
  /*
   * This reduce function handles reduction within a team. It handles
   * parallel regions in both L1 and L2 parallelism levels. It also
   * supports Generic, SPMD, and NoOMP modes.
   *
   * 1. Reduce within a warp.
   * 2. Warp master copies value to warp 0 via shared memory.
   * 3. Warp 0 reduces to a single value.
   * 4. The reduced value is available in the thread that returns 1.
   */

#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
  uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
  uint32_t WarpId = BlockThreadId / WARPSIZE;

  // Volta execution model:
  // For the Generic execution mode a parallel region either has 1 thread and
  // beyond that, always a multiple of 32. For the SPMD execution mode we may
  // have any number of threads.
  if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
    gpu_regular_warp_reduce(reduce_data, shflFct);
  else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
    gpu_irregular_warp_reduce(reduce_data, shflFct,
                              /*LaneCount=*/NumThreads % WARPSIZE,
                              /*LaneId=*/GetThreadIdInBlock() % WARPSIZE);

  // When we have more than [warpsize] number of threads
  // a block reduction is performed here.
  //
  // Only L1 parallel region can enter this if condition.
  if (NumThreads > WARPSIZE) {
    // Gather all the reduced values from each warp
    // to the first warp.
    cpyFct(reduce_data, WarpsNeeded);

    if (WarpId == 0)
      gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
                                BlockThreadId);

    return BlockThreadId == 0;
  }
  return BlockThreadId == 0;
#else
  uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
  if (Liveness == 0xffffffff) // Full warp
    gpu_regular_warp_reduce(reduce_data, shflFct);
  else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
    gpu_irregular_warp_reduce(reduce_data, shflFct,
                              /*LaneCount=*/__popc(Liveness),
                              /*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
  else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
                                    // parallel region may enter here; return
                                    // early.
    return gpu_irregular_simd_reduce(reduce_data, shflFct);

  // When we have more than [warpsize] number of threads
  // a block reduction is performed here.
  //
  // Only L1 parallel region can enter this if condition.
  if (NumThreads > WARPSIZE) {
    uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
    // Gather all the reduced values from each warp
    // to the first warp.
    cpyFct(reduce_data, WarpsNeeded);

    uint32_t WarpId = BlockThreadId / WARPSIZE;
    if (WarpId == 0)
      gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
                                BlockThreadId);

    return BlockThreadId == 0;
  } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
    return BlockThreadId == 0;
  }

  // Get the OMP thread Id. This is different from BlockThreadId in the case of
  // an L2 parallel region.
  return GetOmpThreadId(BlockThreadId, isSPMDExecutionMode,
                        isRuntimeUninitialized) == 0;
#endif // __CUDA_ARCH__ >= 700
}

EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait(
    int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
    kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
  return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
                                      reduce_data, shflFct, cpyFct,
                                      /*isSPMDExecutionMode=*/isSPMDMode());
}

EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
    int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
    kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
  return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
                                      reduce_data, shflFct, cpyFct,
                                      /*isSPMDExecutionMode=*/true,
                                      /*isRuntimeUninitialized=*/true);
}

EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
    int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
    kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
  return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
                                      reduce_data, shflFct, cpyFct,
                                      /*isSPMDExecutionMode=*/false,
                                      /*isRuntimeUninitialized=*/true);
}

INLINE
int32_t nvptx_teams_reduce_nowait(
    int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
    kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
    kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct,
    bool isSPMDExecutionMode, bool isRuntimeUninitialized = false) {
  uint32_t ThreadId = GetLogicalThreadIdInBlock();
  // In non-generic mode all workers participate in the teams reduction.
  // In generic mode only the team master participates in the teams
  // reduction because the workers are waiting for parallel work.
  uint32_t NumThreads =
      isSPMDExecutionMode
          ? GetNumberOfOmpThreads(ThreadId, /*isSPMDExecutionMode=*/true,
                                  isRuntimeUninitialized)
          : /*Master thread only*/ 1;
  uint32_t TeamId = GetBlockIdInKernel();
  uint32_t NumTeams = GetNumberOfBlocksInKernel();
  __shared__ volatile bool IsLastTeam;

  // Team masters of all teams write to the scratchpad.
  if (ThreadId == 0) {
    unsigned int *timestamp = GetTeamsReductionTimestamp();
    char *scratchpad = GetTeamsReductionScratchpad();

    scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
    __threadfence();

    // atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
    // It resets 'timestamp' back to 0 once the last team increments
    // this counter.
    unsigned val = atomicInc(timestamp, NumTeams - 1);
    IsLastTeam = val == NumTeams - 1;
  }

  // We have to wait on L1 barrier because in GENERIC mode the workers
  // are waiting on barrier 0 for work.
  //
  // If we guard this barrier as follows it leads to deadlock, probably
  // because of a compiler bug: if (!IsGenericMode()) __syncthreads();
  uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
  named_sync(L1_BARRIER, SyncWarps * WARPSIZE);

  // If this team is not the last, quit.
  if (/* Volatile read by all threads */ !IsLastTeam)
    return 0;

    //
    // Last team processing.
    //

    // Threads in excess of #teams do not participate in reduction of the
    // scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
  uint32_t ActiveThreads = NumThreads;
  if (NumTeams < NumThreads) {
    ActiveThreads =
        (NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
  }
  if (ThreadId >= ActiveThreads)
    return 0;

  // Load from scratchpad and reduce.
  char *scratchpad = GetTeamsReductionScratchpad();
  ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
  for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
    ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);

  uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
  uint32_t WarpId = ThreadId / WARPSIZE;

  // Reduce across warps to the warp master.
  if ((ActiveThreads % WARPSIZE == 0) ||
      (WarpId < WarpsNeeded - 1)) // Full warp
    gpu_regular_warp_reduce(reduce_data, shflFct);
  else if (ActiveThreads > 1) // Partial warp but contiguous lanes
    // Only SPMD execution mode comes thru this case.
    gpu_irregular_warp_reduce(reduce_data, shflFct,
                              /*LaneCount=*/ActiveThreads % WARPSIZE,
                              /*LaneId=*/ThreadId % WARPSIZE);

  // When we have more than [warpsize] number of threads
  // a block reduction is performed here.
  if (ActiveThreads > WARPSIZE) {
    // Gather all the reduced values from each warp
    // to the first warp.
    cpyFct(reduce_data, WarpsNeeded);

    if (WarpId == 0)
      gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
  }
#else
  if (ThreadId >= NumTeams)
    return 0;

  // Load from scratchpad and reduce.
  char *scratchpad = GetTeamsReductionScratchpad();
  ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
  for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
    ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);

  // Reduce across warps to the warp master.
  uint32_t Liveness = __BALLOT_SYNC(0xFFFFFFFF, true);
  if (Liveness == 0xffffffff) // Full warp
    gpu_regular_warp_reduce(reduce_data, shflFct);
  else // Partial warp but contiguous lanes
    gpu_irregular_warp_reduce(reduce_data, shflFct,
                              /*LaneCount=*/__popc(Liveness),
                              /*LaneId=*/ThreadId % WARPSIZE);

  // When we have more than [warpsize] number of threads
  // a block reduction is performed here.
  uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
  if (ActiveThreads > WARPSIZE) {
    uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
    // Gather all the reduced values from each warp
    // to the first warp.
    cpyFct(reduce_data, WarpsNeeded);

    uint32_t WarpId = ThreadId / WARPSIZE;
    if (WarpId == 0)
      gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
  }
#endif // __CUDA_ARCH__ >= 700

  return ThreadId == 0;
}

EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
                                         size_t reduce_size, void *reduce_data,
                                         kmp_ShuffleReductFctPtr shflFct,
                                         kmp_InterWarpCopyFctPtr cpyFct,
                                         kmp_CopyToScratchpadFctPtr scratchFct,
                                         kmp_LoadReduceFctPtr ldFct) {
  return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
                                   reduce_data, shflFct, cpyFct, scratchFct,
                                   ldFct, /*isSPMDExecutionMode=*/isSPMDMode());
}

EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
    int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
    kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
    kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
  return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
                                   reduce_data, shflFct, cpyFct, scratchFct,
                                   ldFct,
                                   /*isSPMDExecutionMode=*/true,
                                   /*isRuntimeUninitialized=*/true);
}

EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
    int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
    kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
    kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
  return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
                                   reduce_data, shflFct, cpyFct, scratchFct,
                                   ldFct,
                                   /*isSPMDExecutionMode=*/false,
                                   /*isRuntimeUninitialized=*/true);
}