Skip to content

Commit ec80528

Browse files
committed
ParallelFor with compile time optimization of kernels with run time parameters
Branches inside ParallelFor can be very expensive. If a branch uses a lot of resources (e.g., registers), it can significantly affect the performance even if at run time the branch is never executed because it affects the GPU occupancy. For CPUs, it can affect vectorization of the kernel. The new ParallelFor functions use C++17 fold expression to generate kernel launches for all run time variants. The kernel function can use constexpr if to discard unused code blocks for better run time performance. Here are two examples of how to use them. int runtime_option = ...; enum All_options : int { A0, A1, A2, A3}; // Four ParallelFors will be generated. ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>>{}, {runtime_option}, box, [=] AMREX_GPU_DEVICE (int i, int j, int k, auto control) { ... if constexpr (control.value == A0) { ... } else if constexpr (control.value == A1) { ... } else if constexpr (control.value == A2) { ... else { ... } ... }); and int A_runtime_option = ...; int B_runtime_option = ...; enum A_options : int { A0, A1, A2, A3}; enum B_options : int { B0, B1 }; // 4*2=8 ParallelFors will be generated. ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>, CompileTimeOptions<B0,B1> > {}, {A_runtime_option, B_runtime_option}, N, [=] AMREX_GPU_DEVICE (int i, auto A_control, auto B_control) { ... if constexpr (A_control.value == A0) { ... } else if constexpr (A_control.value == A1) { ... } else if constexpr (A_control.value == A2) { ... else { ... } if constexpr (A_control.value != A3 && B_control.value == B1) { ... } ... }); Note that that due to a limitation of CUDA's extended device lambda, the constexpr if block cannot be the one that captures a variable first. If nvcc complains about it, you will have to manually capture it outside constexpr if. The data type for the parameters is int. Thank Maikel Nadolski and Alex Sinn for showing us the meta-programming techniques used here.
1 parent 826cd37 commit ec80528

File tree

9 files changed

+422
-1
lines changed

9 files changed

+422
-1
lines changed
Lines changed: 324 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,324 @@
1+
#ifndef AMREX_CTO_PARALLEL_FOR_H_
2+
#define AMREX_CTO_PARALLEL_FOR_H_
3+
4+
/* This header is not for the users to include directly. It's meant to be
5+
* included in AMReX_GpuLaunch.H, which has included the headers needed
6+
* here. */
7+
8+
/* Thank Maikel Nadolski and Alex Sinn for the techniques used here! */
9+
10+
namespace amrex {
11+
12+
template <int... ctr>
13+
struct CompileTimeOptions {
14+
// TypeList is defined in AMReX_Tuple.H
15+
using list_type = TypeList<std::integral_constant<int, ctr>...>;
16+
};
17+
18+
#if (__cplusplus >= 201703L)
19+
20+
namespace meta
21+
{
22+
template <typename... As, typename... Bs>
23+
constexpr auto operator+ (TypeList<As...>, TypeList<Bs...>) {
24+
return TypeList<As..., Bs...>{};
25+
}
26+
27+
template <typename... Ls, typename A>
28+
constexpr auto single_product (TypeList<Ls...>, A) {
29+
return TypeList<decltype(Ls{} + TypeList<A>{})...>{};
30+
}
31+
32+
template <typename LLs, typename... As>
33+
constexpr auto operator* (LLs, TypeList<As...>) {
34+
return (TypeList<>{} + ... + single_product(LLs{}, As{}));
35+
}
36+
37+
template <typename... Ls>
38+
constexpr auto cartesian_product_n (TypeList<Ls...>) {
39+
return (TypeList<TypeList<>>{} * ... * Ls{});
40+
}
41+
}
42+
43+
namespace detail
44+
{
45+
template <int MT, typename T, class F, typename... As>
46+
std::enable_if_t<std::is_integral<T>::value || std::is_same<T,Box>::value, bool>
47+
ParallelFor_helper2 (T const& N, F&& f, TypeList<As...>,
48+
std::array<int,sizeof...(As)> const& runtime_options)
49+
{
50+
if (runtime_options == std::array<int,sizeof...(As)>{As::value...}) {
51+
if constexpr (std::is_integral<T>::value) {
52+
ParallelFor<MT>(N, [f] AMREX_GPU_DEVICE (T i) noexcept
53+
{
54+
f(i, As{}...);
55+
});
56+
} else {
57+
ParallelFor<MT>(N, [f] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
58+
{
59+
f(i, j, k, As{}...);
60+
});
61+
}
62+
return true;
63+
} else {
64+
return false;
65+
}
66+
}
67+
68+
template <int MT, typename T, class F, typename... As>
69+
std::enable_if_t<std::is_integral<T>::value, bool>
70+
ParallelFor_helper2 (Box const& box, T ncomp, F&& f, TypeList<As...>,
71+
std::array<int,sizeof...(As)> const& runtime_options)
72+
{
73+
if (runtime_options == std::array<int,sizeof...(As)>{As::value...}) {
74+
ParallelFor<MT>(box, ncomp, [f] AMREX_GPU_DEVICE (int i, int j, int k, T n) noexcept
75+
{
76+
f(i, j, k, n, As{}...);
77+
});
78+
return true;
79+
} else {
80+
return false;
81+
}
82+
}
83+
84+
template <int MT, typename T, class F, typename... PPs, typename RO>
85+
std::enable_if_t<std::is_integral<T>::value || std::is_same<T,Box>::value>
86+
ParallelFor_helper1 (T const& N, F&& f, TypeList<PPs...>,
87+
RO const& runtime_options)
88+
{
89+
bool found_option = (false || ... ||
90+
ParallelFor_helper2<MT>(N, std::forward<F>(f),
91+
PPs{}, runtime_options));
92+
amrex::ignore_unused(found_option);
93+
AMREX_ASSERT(found_option);
94+
}
95+
96+
template <int MT, typename T, class F, typename... PPs, typename RO>
97+
std::enable_if_t<std::is_integral<T>::value>
98+
ParallelFor_helper1 (Box const& box, T ncomp, F&& f, TypeList<PPs...>,
99+
RO const& runtime_options)
100+
{
101+
bool found_option = (false || ... ||
102+
ParallelFor_helper2<MT>(box, ncomp, std::forward<F>(f),
103+
PPs{}, runtime_options));
104+
amrex::ignore_unused(found_option);
105+
AMREX_ASSERT(found_option);
106+
}
107+
}
108+
109+
#endif
110+
111+
template <int MT, typename T, class F, typename... CTOs>
112+
std::enable_if_t<std::is_integral<T>::value>
113+
ParallelFor (TypeList<CTOs...> /*list_of_compile_time_options*/,
114+
std::array<int,sizeof...(CTOs)> const& runtime_options,
115+
T N, F&& f)
116+
{
117+
#if (__cplusplus >= 201703L)
118+
using OptionsListList = TypeList<typename CTOs::list_type...>;
119+
detail::ParallelFor_helper1<MT>(N, std::forward<F>(f),
120+
meta::cartesian_product_n(OptionsListList{}),
121+
runtime_options);
122+
#else
123+
amrex::ignore_unused(N, f, runtime_options);
124+
static_assert(std::is_integral<F>::value, "This requires C++17");
125+
#endif
126+
}
127+
128+
template <int MT, class F, typename... CTOs>
129+
void ParallelFor (TypeList<CTOs...> /*list_of_compile_time_options*/,
130+
std::array<int,sizeof...(CTOs)> const& runtime_options,
131+
Box const& box, F&& f)
132+
{
133+
#if (__cplusplus >= 201703L)
134+
using OptionsListList = TypeList<typename CTOs::list_type...>;
135+
detail::ParallelFor_helper1<MT>(box, std::forward<F>(f),
136+
meta::cartesian_product_n(OptionsListList{}),
137+
runtime_options);
138+
#else
139+
amrex::ignore_unused(box, f, runtime_options);
140+
static_assert(std::is_integral<F>::value, "This requires C++17");
141+
#endif
142+
}
143+
144+
template <int MT, typename T, class F, typename... CTOs>
145+
std::enable_if_t<std::is_integral<T>::value>
146+
ParallelFor (TypeList<CTOs...> /*list_of_compile_time_options*/,
147+
std::array<int,sizeof...(CTOs)> const& runtime_options,
148+
Box const& box, T ncomp, F&& f)
149+
{
150+
#if (__cplusplus >= 201703L)
151+
using OptionsListList = TypeList<typename CTOs::list_type...>;
152+
detail::ParallelFor_helper1<MT>(box, ncomp, std::forward<F>(f),
153+
meta::cartesian_product_n(OptionsListList{}),
154+
runtime_options);
155+
#else
156+
amrex::ignore_unused(box, ncomp, f, runtime_options);
157+
static_assert(std::is_integral<F>::value, "This requires C++17");
158+
#endif
159+
}
160+
161+
/**
162+
* \brief ParallelFor with compile time optimization of kernels with run time options.
163+
*
164+
* It uses fold expression to generate kernel launches for all combinations
165+
* of the run time options. The kernel function can use constexpr if to
166+
* discard unused code blocks for better run time performance. In the
167+
* example below, the code will be expanded into 4*2=8 normal ParallelFors
168+
* for all combinations of the run time parameters.
169+
\verbatim
170+
int A_runtime_option = ...;
171+
int B_runtime_option = ...;
172+
enum A_options : int { A0, A1, A2, A3};
173+
enum B_options : int { B0, B1 };
174+
ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
175+
CompileTimeOptions<B0,B1>>{},
176+
{A_runtime_option, B_runtime_option},
177+
N, [=] AMREX_GPU_DEVICE (int i, auto A_control, auto B_control)
178+
{
179+
...
180+
if constexpr (A_control.value == A0) {
181+
...
182+
} else if constexpr (A_control.value == A1) {
183+
...
184+
} else if constexpr (A_control.value == A2) {
185+
...
186+
else {
187+
...
188+
}
189+
if constexpr (A_control.value != A3 && B_control.value == B1) {
190+
...
191+
}
192+
...
193+
});
194+
\endverbatim
195+
* Note that due to a limitation of CUDA's extended device lambda, the
196+
* constexpr if block cannot be the one that captures a variable first.
197+
* If nvcc complains about it, you will have to manually capture it outside
198+
* constexpr if. The data type for the parameters is int.
199+
*
200+
* \param ctos list of all possible values of the parameters.
201+
* \param option the run time parameters.
202+
* \param N an interger specifying the 1D for loop's range.
203+
* \param f a callable object taking an integer and working on that iteration.
204+
*/
205+
template <typename T, class F, typename... CTOs>
206+
std::enable_if_t<std::is_integral<T>::value>
207+
ParallelFor (TypeList<CTOs...> ctos,
208+
std::array<int,sizeof...(CTOs)> const& option,
209+
T N, F&& f)
210+
{
211+
ParallelFor<AMREX_GPU_MAX_THREADS>(ctos, option, N, std::forward<F>(f));
212+
}
213+
214+
/**
215+
* \brief ParallelFor with compile time optimization of kernels with run time options.
216+
*
217+
* It uses fold expression to generate kernel launches for all combinations
218+
* of the run time options. The kernel function can use constexpr if to
219+
* discard unused code blocks for better run time performance. In the
220+
* example below, the code will be expanded into 4*2=8 normal ParallelFors
221+
* for all combinations of the run time parameters.
222+
\verbatim
223+
int A_runtime_option = ...;
224+
int B_runtime_option = ...;
225+
enum A_options : int { A0, A1, A2, A3};
226+
enum B_options : int { B0, B1 };
227+
ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
228+
CompileTimeOptions<B0,B1>>{},
229+
{A_runtime_option, B_runtime_option},
230+
box, [=] AMREX_GPU_DEVICE (int i, int j, int k,
231+
auto A_control, auto B_control)
232+
{
233+
...
234+
if constexpr (A_control.value == A0) {
235+
...
236+
} else if constexpr (A_control.value == A1) {
237+
...
238+
} else if constexpr (A_control.value == A2) {
239+
...
240+
else {
241+
...
242+
}
243+
if constexpr (A_control.value != A3 && B_control.value == B1) {
244+
...
245+
}
246+
...
247+
});
248+
\endverbatim
249+
* Note that due to a limitation of CUDA's extended device lambda, the
250+
* constexpr if block cannot be the one that captures a variable first.
251+
* If nvcc complains about it, you will have to manually capture it outside
252+
* constexpr if. The data type for the parameters is int.
253+
*
254+
* \param ctos list of all possible values of the parameters.
255+
* \param option the run time parameters.
256+
* \param box a Box specifying the 3D for loop's range.
257+
* \param f a callable object taking three integers and working on the given cell.
258+
*/
259+
template <class F, typename... CTOs>
260+
void ParallelFor (TypeList<CTOs...> ctos,
261+
std::array<int,sizeof...(CTOs)> const& option,
262+
Box const& box, F&& f)
263+
{
264+
ParallelFor<AMREX_GPU_MAX_THREADS>(ctos, option, box, std::forward<F>(f));
265+
}
266+
267+
/**
268+
* \brief ParallelFor with compile time optimization of kernels with run time options.
269+
*
270+
* It uses fold expression to generate kernel launches for all combinations
271+
* of the run time options. The kernel function can use constexpr if to
272+
* discard unused code blocks for better run time performance. In the
273+
* example below, the code will be expanded into 4*2=8 normal ParallelFors
274+
* for all combinations of the run time parameters.
275+
\verbatim
276+
int A_runtime_option = ...;
277+
int B_runtime_option = ...;
278+
enum A_options : int { A0, A1, A2, A3};
279+
enum B_options : int { B0, B1 };
280+
ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
281+
CompileTimeOptions<B0,B1>>{},
282+
{A_runtime_option, B_runtime_option},
283+
box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n,
284+
auto A_control, auto B_control)
285+
{
286+
...
287+
if constexpr (A_control.value == A0) {
288+
...
289+
} else if constexpr (A_control.value == A1) {
290+
...
291+
} else if constexpr (A_control.value == A2) {
292+
...
293+
else {
294+
...
295+
}
296+
if constexpr (A_control.value != A3 && B_control.value == B1) {
297+
...
298+
}
299+
...
300+
});
301+
\endverbatim
302+
* Note that due to a limitation of CUDA's extended device lambda, the
303+
* constexpr if block cannot be the one that captures a variable first.
304+
* If nvcc complains about it, you will have to manually capture it outside
305+
* constexpr if. The data type for the parameters is int.
306+
*
307+
* \param ctos list of all possible values of the parameters.
308+
* \param option the run time parameters.
309+
* \param box a Box specifying the iteration in 3D space.
310+
* \param ncomp an integer specifying the range for iteration over components.
311+
* \param f a callable object taking three integers and working on the given cell.
312+
*/
313+
template <typename T, class F, typename... CTOs>
314+
std::enable_if_t<std::is_integral<T>::value>
315+
ParallelFor (TypeList<CTOs...> ctos,
316+
std::array<int,sizeof...(CTOs)> const& option,
317+
Box const& box, T ncomp, F&& f)
318+
{
319+
ParallelFor<AMREX_GPU_MAX_THREADS>(ctos, option, box, ncomp, std::forward<F>(f));
320+
}
321+
322+
}
323+
324+
#endif

Src/Base/AMReX_GpuLaunch.H

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -443,4 +443,6 @@ namespace Gpu {
443443

444444
#endif
445445

446+
#include <AMReX_CTOParallelForImpl.H>
447+
446448
#endif

Src/Base/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,7 @@ target_sources( amrex
223223
AMReX_MFParallelForC.H
224224
AMReX_MFParallelForG.H
225225
AMReX_TagParallelFor.H
226+
AMReX_CTOParallelForImpl.H
226227
AMReX_ParReduce.H
227228
# CUDA --------------------------------------------------------------------
228229
AMReX_CudaGraph.H

Src/Base/Make.package

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ C$(AMREX_BASE)_headers += AMReX_MFParallelForC.H
100100
C$(AMREX_BASE)_headers += AMReX_MFParallelForG.H
101101

102102
C$(AMREX_BASE)_headers += AMReX_TagParallelFor.H
103+
C$(AMREX_BASE)_headers += AMReX_CTOParallelForImpl.H
103104

104105
C$(AMREX_BASE)_headers += AMReX_ParReduce.H
105106

Tests/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#
22
# List of subdirectories to search for CMakeLists.
33
#
4-
set( AMREX_TESTS_SUBDIRS AsyncOut MultiBlock Amr CLZ Parser)
4+
set( AMREX_TESTS_SUBDIRS AsyncOut MultiBlock Amr CLZ Parser CTOParFor)
55

66
if (AMReX_PARTICLES)
77
list(APPEND AMREX_TESTS_SUBDIRS Particles)

Tests/CTOParFor/CMakeLists.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
set(_sources main.cpp)
2+
set(_input_files)
3+
4+
setup_test(_sources _input_files)
5+
6+
unset(_sources)
7+
unset(_input_files)

Tests/CTOParFor/GNUmakefile

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
AMREX_HOME = ../../
2+
3+
DEBUG = FALSE
4+
DIM = 3
5+
COMP = gcc
6+
7+
USE_MPI = FALSE
8+
USE_OMP = FALSE
9+
USE_CUDA = FALSE
10+
11+
TINY_PROFILE = FALSE
12+
13+
CXXSTD = c++17
14+
15+
include $(AMREX_HOME)/Tools/GNUMake/Make.defs
16+
17+
include ./Make.package
18+
include $(AMREX_HOME)/Src/Base/Make.package
19+
20+
include $(AMREX_HOME)/Tools/GNUMake/Make.rules

Tests/CTOParFor/Make.package

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
CEXE_sources += main.cpp
2+
3+
4+

0 commit comments

Comments
 (0)