Skip to content

Commit ac40dbe

Browse files
committed
[Offload][CUDA] Allow CUDA kernels to use LLVM/Offload
Through the new `-foffload-via-llvm` flag, CUDA kernels can now be lowered to the LLVM/Offload API. On the Clang side, this is simply done by using the OpenMP offload toolchain and emitting calls to `llvm*` functions to orchestrate the kernel launch rather than `cuda*` functions. These `llvm*` functions are implemented on top of the existing LLVM/Offload API. As we are about to redefine the Offload API, this wil help us in the design process as a second offload language. We do not support any CUDA APIs yet, however, we could: https://www.osti.gov/servlets/purl/1892137 For proper host execution we need to resurrect/rebase https://tianshilei.me/wp-content/uploads/2021/12/llpp-2021.pdf (which was designed for debugging). ``` ❯❯❯ cat test.cu extern "C" { void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum); void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum); } __global__ void square(int *A) { *A = 42; } int main(int argc, char **argv) { int DevNo = 0; int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo)); *Ptr = 7; printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr); square<<<1, 1>>>(Ptr); printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr); llvm_omp_target_free_shared(Ptr, DevNo); } ❯❯❯ clang++ test.cu -O3 -o test123 -foffload-via-llvm --offload-arch=native ❯❯❯ llvm-objdump --offloading test123 test123: file format elf64-x86-64 OFFLOADING IMAGE [0]: kind elf arch gfx90a triple amdgcn-amd-amdhsa producer openmp ❯❯❯ LIBOMPTARGET_INFO=16 ./test123 Ptr 0x155448ac8000, *Ptr 7 Ptr 0x155448ac8000, *Ptr 42 ```
1 parent 9fa161e commit ac40dbe

27 files changed

+576
-53
lines changed

clang/include/clang/Basic/LangOptions.def

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,7 @@ LANGOPT(GPUMaxThreadsPerBlock, 32, 1024, "default max threads per block for kern
288288
LANGOPT(GPUDeferDiag, 1, 0, "defer host/device related diagnostic messages for CUDA/HIP")
289289
LANGOPT(GPUExcludeWrongSideOverloads, 1, 0, "always exclude wrong side overloads in overloading resolution for CUDA/HIP")
290290
LANGOPT(OffloadingNewDriver, 1, 0, "use the new driver for generating offloading code.")
291+
LANGOPT(OffloadViaLLVM, 1, 0, "target LLVM/Offload as portable offloading runtime.")
291292

292293
LANGOPT(SYCLIsDevice , 1, 0, "Generate code for SYCL device")
293294
LANGOPT(SYCLIsHost , 1, 0, "SYCL host compilation")

clang/include/clang/Driver/Options.td

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1296,6 +1296,12 @@ def no_offload_compress : Flag<["--"], "no-offload-compress">;
12961296
def offload_compression_level_EQ : Joined<["--"], "offload-compression-level=">,
12971297
Flags<[HelpHidden]>,
12981298
HelpText<"Compression level for offload device binaries (HIP only)">;
1299+
1300+
defm offload_via_llvm : BoolFOption<"offload-via-llvm",
1301+
LangOpts<"OffloadViaLLVM">, DefaultFalse,
1302+
PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use">,
1303+
NegFlag<SetFalse, [], [ClangOption], "Don't use">,
1304+
BothFlags<[], [ClangOption], " LLVM/Offload as portable offloading runtime.">>;
12991305
}
13001306

13011307
// CUDA options

clang/lib/CodeGen/CGCUDANV.cpp

Lines changed: 82 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,12 @@
1515
#include "CGCXXABI.h"
1616
#include "CodeGenFunction.h"
1717
#include "CodeGenModule.h"
18+
#include "clang/AST/CharUnits.h"
1819
#include "clang/AST/Decl.h"
1920
#include "clang/Basic/Cuda.h"
2021
#include "clang/CodeGen/CodeGenABITypes.h"
2122
#include "clang/CodeGen/ConstantInitBuilder.h"
23+
#include "llvm/ADT/StringRef.h"
2224
#include "llvm/Frontend/Offloading/Utility.h"
2325
#include "llvm/IR/BasicBlock.h"
2426
#include "llvm/IR/Constants.h"
@@ -36,6 +38,11 @@ constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
3638

3739
class CGNVCUDARuntime : public CGCUDARuntime {
3840

41+
/// The prefix used for function calls and section names (CUDA, HIP, LLVM)
42+
StringRef Prefix;
43+
/// TODO: We should transition the OpenMP section to LLVM/Offload
44+
StringRef SectionPrefix;
45+
3946
private:
4047
llvm::IntegerType *IntTy, *SizeTy;
4148
llvm::Type *VoidTy;
@@ -132,6 +139,9 @@ class CGNVCUDARuntime : public CGCUDARuntime {
132139
return DummyFunc;
133140
}
134141

142+
Address prepareKernelArgs(CodeGenFunction &CGF, FunctionArgList &Args);
143+
Address prepareKernelArgsLLVMOffload(CodeGenFunction &CGF,
144+
FunctionArgList &Args);
135145
void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
136146
void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
137147
std::string getDeviceSideName(const NamedDecl *ND) override;
@@ -191,15 +201,11 @@ class CGNVCUDARuntime : public CGCUDARuntime {
191201
} // end anonymous namespace
192202

193203
std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
194-
if (CGM.getLangOpts().HIP)
195-
return ((Twine("hip") + Twine(FuncName)).str());
196-
return ((Twine("cuda") + Twine(FuncName)).str());
204+
return (Prefix + FuncName).str();
197205
}
198206
std::string
199207
CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
200-
if (CGM.getLangOpts().HIP)
201-
return ((Twine("__hip") + Twine(FuncName)).str());
202-
return ((Twine("__cuda") + Twine(FuncName)).str());
208+
return ("__" + Prefix + FuncName).str();
203209
}
204210

205211
static std::unique_ptr<MangleContext> InitDeviceMC(CodeGenModule &CGM) {
@@ -227,6 +233,14 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
227233
SizeTy = CGM.SizeTy;
228234
VoidTy = CGM.VoidTy;
229235
PtrTy = CGM.UnqualPtrTy;
236+
237+
if (CGM.getLangOpts().OffloadViaLLVM) {
238+
Prefix = "llvm";
239+
SectionPrefix = "omp";
240+
} else if (CGM.getLangOpts().HIP)
241+
SectionPrefix = Prefix = "hip";
242+
else
243+
SectionPrefix = Prefix = "cuda";
230244
}
231245

232246
llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
@@ -305,18 +319,58 @@ void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
305319
}
306320
if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
307321
CudaFeature::CUDA_USES_NEW_LAUNCH) ||
308-
(CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI))
322+
(CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI) ||
323+
(CGF.getLangOpts().OffloadViaLLVM))
309324
emitDeviceStubBodyNew(CGF, Args);
310325
else
311326
emitDeviceStubBodyLegacy(CGF, Args);
312327
}
313328

314-
// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
315-
// array and kernels are launched using cudaLaunchKernel().
316-
void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
317-
FunctionArgList &Args) {
318-
// Build the shadow stack entry at the very start of the function.
329+
/// CUDA passes the arguments with a level of indirection. For example, a
330+
/// (void*, short, void*) is passed as {void **, short *, void **} to the launch
331+
/// function. For the LLVM/offload launch we flatten the arguments into the
332+
/// struct directly. In addition, we include the size of the arguments, thus
333+
/// pass {sizeof({void *, short, void *}), ptr to {void *, short, void *},
334+
/// nullptr}. The last nullptr needs to be initialized to an array of pointers
335+
/// pointing to the arguments if we want to offload to the host.
336+
Address CGNVCUDARuntime::prepareKernelArgsLLVMOffload(CodeGenFunction &CGF,
337+
FunctionArgList &Args) {
338+
SmallVector<llvm::Type *> ArgTypes, KernelLaunchParamsTypes;
339+
for (auto &Arg : Args)
340+
ArgTypes.push_back(CGF.ConvertTypeForMem(Arg->getType()));
341+
llvm::StructType *KernelArgsTy = llvm::StructType::create(ArgTypes);
342+
343+
auto *Int64Ty = CGF.Builder.getInt64Ty();
344+
KernelLaunchParamsTypes.push_back(Int64Ty);
345+
KernelLaunchParamsTypes.push_back(PtrTy);
346+
KernelLaunchParamsTypes.push_back(PtrTy);
347+
348+
llvm::StructType *KernelLaunchParamsTy =
349+
llvm::StructType::create(KernelLaunchParamsTypes);
350+
Address KernelArgs = CGF.CreateTempAllocaWithoutCast(
351+
KernelArgsTy, CharUnits::fromQuantity(16), "kernel_args");
352+
Address KernelLaunchParams = CGF.CreateTempAllocaWithoutCast(
353+
KernelLaunchParamsTy, CharUnits::fromQuantity(16),
354+
"kernel_launch_params");
355+
356+
auto KernelArgsSize = CGM.getDataLayout().getTypeAllocSize(KernelArgsTy);
357+
CGF.Builder.CreateStore(llvm::ConstantInt::get(Int64Ty, KernelArgsSize),
358+
CGF.Builder.CreateStructGEP(KernelLaunchParams, 0));
359+
CGF.Builder.CreateStore(KernelArgs.emitRawPointer(CGF),
360+
CGF.Builder.CreateStructGEP(KernelLaunchParams, 1));
361+
CGF.Builder.CreateStore(llvm::Constant::getNullValue(PtrTy),
362+
CGF.Builder.CreateStructGEP(KernelLaunchParams, 2));
363+
364+
for (unsigned i = 0; i < Args.size(); ++i) {
365+
auto *ArgVal = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[i]));
366+
CGF.Builder.CreateStore(ArgVal, CGF.Builder.CreateStructGEP(KernelArgs, i));
367+
}
319368

369+
return KernelLaunchParams;
370+
}
371+
372+
Address CGNVCUDARuntime::prepareKernelArgs(CodeGenFunction &CGF,
373+
FunctionArgList &Args) {
320374
// Calculate amount of space we will need for all arguments. If we have no
321375
// args, allocate a single pointer so we still have a valid pointer to the
322376
// argument array that we can pass to runtime, even if it will be unused.
@@ -331,6 +385,17 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
331385
VoidVarPtr, CGF.Builder.CreateConstGEP1_32(
332386
PtrTy, KernelArgs.emitRawPointer(CGF), i));
333387
}
388+
return KernelArgs;
389+
}
390+
391+
// CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
392+
// array and kernels are launched using cudaLaunchKernel().
393+
void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
394+
FunctionArgList &Args) {
395+
// Build the shadow stack entry at the very start of the function.
396+
Address KernelArgs = CGF.getLangOpts().OffloadViaLLVM
397+
? prepareKernelArgsLLVMOffload(CGF, Args)
398+
: prepareKernelArgs(CGF, Args);
334399

335400
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
336401

@@ -1129,8 +1194,9 @@ void CGNVCUDARuntime::transformManagedVars() {
11291194
// registered. The linker will provide a pointer to this section so we can
11301195
// register the symbols with the linked device image.
11311196
void CGNVCUDARuntime::createOffloadingEntries() {
1132-
StringRef Section = CGM.getLangOpts().HIP ? "hip_offloading_entries"
1133-
: "cuda_offloading_entries";
1197+
SmallVector<char, 32> Out;
1198+
StringRef Section = (SectionPrefix + "_offloading_entries").toStringRef(Out);
1199+
11341200
llvm::Module &M = CGM.getModule();
11351201
for (KernelInfo &I : EmittedKernels)
11361202
llvm::offloading::emitOffloadingEntry(
@@ -1199,7 +1265,8 @@ llvm::Function *CGNVCUDARuntime::finalizeModule() {
11991265
}
12001266
return nullptr;
12011267
}
1202-
if (CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode)
1268+
if (CGM.getLangOpts().OffloadViaLLVM ||
1269+
(CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode))
12031270
createOffloadingEntries();
12041271
else
12051272
return makeModuleCtorFunction();

clang/lib/Driver/Driver.cpp

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -792,11 +792,13 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
792792
}) ||
793793
C.getInputArgs().hasArg(options::OPT_hip_link) ||
794794
C.getInputArgs().hasArg(options::OPT_hipstdpar);
795+
bool UseLLVMOffload = C.getInputArgs().hasArg(
796+
options::OPT_foffload_via_llvm, options::OPT_fno_offload_via_llvm, false);
795797
if (IsCuda && IsHIP) {
796798
Diag(clang::diag::err_drv_mix_cuda_hip);
797799
return;
798800
}
799-
if (IsCuda) {
801+
if (IsCuda && !UseLLVMOffload) {
800802
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
801803
const llvm::Triple &HostTriple = HostTC->getTriple();
802804
auto OFK = Action::OFK_Cuda;
@@ -818,7 +820,7 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
818820
CudaInstallation.WarnIfUnsupportedVersion();
819821
}
820822
C.addOffloadDeviceToolChain(CudaTC.get(), OFK);
821-
} else if (IsHIP) {
823+
} else if (IsHIP && !UseLLVMOffload) {
822824
if (auto *OMPTargetArg =
823825
C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
824826
Diag(clang::diag::err_drv_unsupported_opt_for_language_mode)
@@ -842,10 +844,11 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
842844
// We need to generate an OpenMP toolchain if the user specified targets with
843845
// the -fopenmp-targets option or used --offload-arch with OpenMP enabled.
844846
bool IsOpenMPOffloading =
845-
C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
846-
options::OPT_fno_openmp, false) &&
847-
(C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ) ||
848-
C.getInputArgs().hasArg(options::OPT_offload_arch_EQ));
847+
((IsCuda || IsHIP) && UseLLVMOffload) ||
848+
(C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
849+
options::OPT_fno_openmp, false) &&
850+
(C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ) ||
851+
C.getInputArgs().hasArg(options::OPT_offload_arch_EQ)));
849852
if (IsOpenMPOffloading) {
850853
// We expect that -fopenmp-targets is always used in conjunction with the
851854
// option -fopenmp specifying a valid runtime with offloading support, i.e.
@@ -873,7 +876,7 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
873876
for (StringRef T : OpenMPTargets->getValues())
874877
OpenMPTriples.insert(T);
875878
} else if (C.getInputArgs().hasArg(options::OPT_offload_arch_EQ) &&
876-
!IsHIP && !IsCuda) {
879+
((!IsHIP && !IsCuda) || UseLLVMOffload)) {
877880
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
878881
auto AMDTriple = getHIPOffloadTargetTriple(*this, C.getInputArgs());
879882
auto NVPTXTriple = getNVIDIAOffloadTargetTriple(*this, C.getInputArgs(),
@@ -4138,6 +4141,8 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
41384141

41394142
bool UseNewOffloadingDriver =
41404143
C.isOffloadingHostKind(Action::OFK_OpenMP) ||
4144+
Args.hasFlag(options::OPT_foffload_via_llvm,
4145+
options::OPT_fno_offload_via_llvm, false) ||
41414146
Args.hasFlag(options::OPT_offload_new_driver,
41424147
options::OPT_no_offload_new_driver, false);
41434148

clang/lib/Driver/ToolChains/Clang.cpp

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1125,6 +1125,18 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
11251125
CmdArgs.push_back("__clang_openmp_device_functions.h");
11261126
}
11271127

1128+
if (Args.hasArg(options::OPT_foffload_via_llvm)) {
1129+
// Add llvm_wrappers/* to our system include path. This lets us wrap
1130+
// standard library headers and other headers.
1131+
SmallString<128> P(D.ResourceDir);
1132+
llvm::sys::path::append(P, "include", "llvm_offload_wrappers");
1133+
CmdArgs.append({"-internal-isystem", Args.MakeArgString(P), "-include"});
1134+
if (JA.isDeviceOffloading(Action::OFK_OpenMP))
1135+
CmdArgs.push_back("__llvm_offload_device.h");
1136+
else
1137+
CmdArgs.push_back("__llvm_offload_host.h");
1138+
}
1139+
11281140
// Add -i* options, and automatically translate to
11291141
// -include-pch/-include-pth for transparent PCH support. It's
11301142
// wonky, but we include looking for .gch so we can support seamless
@@ -6599,6 +6611,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
65996611
// device offloading action other than OpenMP.
66006612
if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
66016613
options::OPT_fno_openmp, false) &&
6614+
!Args.hasFlag(options::OPT_foffload_via_llvm,
6615+
options::OPT_fno_offload_via_llvm, false) &&
66026616
(JA.isDeviceOffloading(Action::OFK_None) ||
66036617
JA.isDeviceOffloading(Action::OFK_OpenMP))) {
66046618
switch (D.getOpenMPRuntime(Args)) {
@@ -6676,11 +6690,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
66766690
Args.addOptOutFlag(CmdArgs, options::OPT_fopenmp_extensions,
66776691
options::OPT_fno_openmp_extensions);
66786692
}
6679-
6680-
// Forward the new driver to change offloading code generation.
6681-
if (Args.hasFlag(options::OPT_offload_new_driver,
6682-
options::OPT_no_offload_new_driver, false))
6693+
// Forward the offload runtime change to code generation, liboffload implies
6694+
// new driver. Otherwise, check if we should forward the new driver to change
6695+
// offloading code generation.
6696+
if (Args.hasFlag(options::OPT_foffload_via_llvm,
6697+
options::OPT_fno_offload_via_llvm, false)) {
6698+
CmdArgs.append({"--offload-new-driver", "-foffload-via-llvm"});
6699+
} else if (Args.hasFlag(options::OPT_offload_new_driver,
6700+
options::OPT_no_offload_new_driver, false)) {
66836701
CmdArgs.push_back("--offload-new-driver");
6702+
}
66846703

66856704
SanitizeArgs.addArgs(TC, Args, CmdArgs, InputType);
66866705

clang/lib/Driver/ToolChains/CommonArgs.cpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1205,8 +1205,13 @@ bool tools::addOpenMPRuntime(const Compilation &C, ArgStringList &CmdArgs,
12051205
bool ForceStaticHostRuntime, bool IsOffloadingHost,
12061206
bool GompNeedsRT) {
12071207
if (!Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
1208-
options::OPT_fno_openmp, false))
1208+
options::OPT_fno_openmp, false)) {
1209+
// We need libomptarget (liboffload) if it's the choosen offloading runtime.
1210+
if (Args.hasFlag(options::OPT_foffload_via_llvm,
1211+
options::OPT_fno_offload_via_llvm, false))
1212+
CmdArgs.push_back("-lomptarget");
12091213
return false;
1214+
}
12101215

12111216
Driver::OpenMPRuntimeKind RTKind = TC.getDriver().getOpenMPRuntime(Args);
12121217

clang/lib/Driver/ToolChains/Cuda.cpp

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -861,17 +861,15 @@ void CudaToolChain::addClangTargetOptions(
861861
DeviceOffloadingKind == Action::OFK_Cuda) &&
862862
"Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs.");
863863

864-
if (DeviceOffloadingKind == Action::OFK_Cuda) {
865-
CC1Args.append(
866-
{"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
867-
868-
// Unsized function arguments used for variadics were introduced in CUDA-9.0
869-
// We still do not support generating code that actually uses variadic
870-
// arguments yet, but we do need to allow parsing them as recent CUDA
871-
// headers rely on that. https://github.com/llvm/llvm-project/issues/58410
872-
if (CudaInstallation.version() >= CudaVersion::CUDA_90)
873-
CC1Args.push_back("-fcuda-allow-variadic-functions");
874-
}
864+
CC1Args.append(
865+
{"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
866+
867+
// Unsized function arguments used for variadics were introduced in CUDA-9.0
868+
// We still do not support generating code that actually uses variadic
869+
// arguments yet, but we do need to allow parsing them as recent CUDA
870+
// headers rely on that. https://github.com/llvm/llvm-project/issues/58410
871+
if (CudaInstallation.version() >= CudaVersion::CUDA_90)
872+
CC1Args.push_back("-fcuda-allow-variadic-functions");
875873

876874
if (DriverArgs.hasArg(options::OPT_nogpulib))
877875
return;
@@ -889,6 +887,13 @@ void CudaToolChain::addClangTargetOptions(
889887
CC1Args.push_back("-mlink-builtin-bitcode");
890888
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
891889

890+
// For now, we don't use any Offload/OpenMP device runtime when we offload
891+
// CUDA via LLVM/Offload. We should split the Offload/OpenMP device runtime
892+
// and include the "generic" (or CUDA-specific) parts.
893+
if (DriverArgs.hasFlag(options::OPT_foffload_via_llvm,
894+
options::OPT_fno_offload_via_llvm, false))
895+
return;
896+
892897
clang::CudaVersion CudaInstallationVersion = CudaInstallation.version();
893898

894899
if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,

0 commit comments

Comments
 (0)