clang: Avoid hardcoding some offload triple strings#193811
Conversation
This will make it easier to use the subarch field in the future.
|
@llvm/pr-subscribers-clang-driver @llvm/pr-subscribers-clang Author: Matt Arsenault (arsenm) ChangesThis will make it easier to use the subarch field in the future. Full diff: https://github.com/llvm/llvm-project/pull/193811.diff 4 Files Affected:
diff --git a/clang/lib/Basic/OffloadArch.cpp b/clang/lib/Basic/OffloadArch.cpp
index 1dea381bf7682..4f739f7352cb7 100644
--- a/clang/lib/Basic/OffloadArch.cpp
+++ b/clang/lib/Basic/OffloadArch.cpp
@@ -152,15 +152,20 @@ OffloadArch StringToOffloadArch(llvm::StringRef S) {
llvm::Triple OffloadArchToTriple(const llvm::Triple &DefaultToolchainTriple,
OffloadArch ID) {
if (ID == OffloadArch::AMDGCNSPIRV)
- return llvm::Triple("spirv64-amd-amdhsa");
+ return llvm::Triple(llvm::Triple::spirv64, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA);
- if (IsNVIDIAOffloadArch(ID))
- return DefaultToolchainTriple.isArch64Bit()
- ? llvm::Triple("nvptx64-nvidia-cuda")
- : llvm::Triple("nvptx-nvidia-cuda");
+ if (IsNVIDIAOffloadArch(ID)) {
+ llvm::Triple::ArchType Arch = DefaultToolchainTriple.isArch64Bit()
+ ? llvm::Triple::nvptx64
+ : llvm::Triple::nvptx;
+ return llvm::Triple(Arch, llvm::Triple::NoSubArch, llvm::Triple::NVIDIA,
+ llvm::Triple::CUDA);
+ }
if (IsAMDOffloadArch(ID))
- return llvm::Triple("amdgcn-amd-amdhsa");
+ return llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA);
return {};
}
diff --git a/clang/lib/Basic/Targets/SPIR.cpp b/clang/lib/Basic/Targets/SPIR.cpp
index 7726c95cc7392..e55a6ed8bc63f 100644
--- a/clang/lib/Basic/Targets/SPIR.cpp
+++ b/clang/lib/Basic/Targets/SPIR.cpp
@@ -108,7 +108,10 @@ void SPIRV64TargetInfo::getTargetDefines(const LangOptions &Opts,
DefineStd(Builder, "SPIRV64", Opts);
}
-static const AMDGPUTargetInfo AMDGPUTI(llvm::Triple("amdgcn-amd-amdhsa"), {});
+static const AMDGPUTargetInfo
+ AMDGPUTI(llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA),
+ {});
ArrayRef<const char *> SPIRV64AMDGCNTargetInfo::getGCCRegNames() const {
return AMDGPUTI.getGCCRegNames();
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 2cbe3179892ff..a7f8820fc991a 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -973,8 +973,10 @@ static TripleSet inferOffloadToolchains(Compilation &C,
for (llvm::StringRef Arch : Archs) {
OffloadArch ID = StringToOffloadArch(Arch);
if (ID == OffloadArch::Unknown)
- ID = StringToOffloadArch(
- getProcessorFromTargetID(llvm::Triple("amdgcn-amd-amdhsa"), Arch));
+ ID = StringToOffloadArch(getProcessorFromTargetID(
+ llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA),
+ Arch));
if (Kind == Action::OFK_HIP && !IsAMDOffloadArch(ID)) {
C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
@@ -1020,17 +1022,20 @@ static TripleSet inferOffloadToolchains(Compilation &C,
// Infer the default target triple if no specific architectures are given.
if (Archs.empty() && Kind == Action::OFK_HIP)
- Triples.insert(llvm::Triple("amdgcn-amd-amdhsa"));
- else if (Archs.empty() && Kind == Action::OFK_Cuda)
+ Triples.insert(llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA));
+ else if (Archs.empty() && Kind == Action::OFK_Cuda) {
+ llvm::Triple::ArchType Arch =
+ C.getDefaultToolChain().getTriple().isArch64Bit()
+ ? llvm::Triple::nvptx64
+ : llvm::Triple::nvptx;
+ Triples.insert(llvm::Triple(Arch, llvm::Triple::NoSubArch,
+ llvm::Triple::NVIDIA, llvm::Triple::CUDA));
+ } else if (Archs.empty() && Kind == Action::OFK_SYCL)
Triples.insert(
llvm::Triple(C.getDefaultToolChain().getTriple().isArch64Bit()
- ? "nvptx64-nvidia-cuda"
- : "nvptx-nvidia-cuda"));
- else if (Archs.empty() && Kind == Action::OFK_SYCL)
- Triples.insert(
- llvm::Triple(C.getDefaultToolChain().getTriple().isArch64Bit()
- ? "spirv64-unknown-unknown"
- : "spirv32-unknown-unknown"));
+ ? llvm::Triple::spirv64
+ : llvm::Triple::spirv32));
// We need to dispatch these to the appropriate toolchain now.
C.getArgs().eraseArg(options::OPT_offload_arch_EQ);
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index ea2ae0c264de5..96a4648f06314 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -1024,7 +1024,9 @@ bool DiagnoseUnguardedBuiltins::VisitCallExpr(CallExpr *CE) {
for (auto &&F : llvm::split(BInfo.getRequiredFeatures(GID), ','))
FeatureMap[F] = true;
} else {
- static const llvm::Triple AMDGCN("amdgcn-amd-amdhsa");
+ static const llvm::Triple AMDGCN(llvm::Triple::amdgcn,
+ llvm::Triple::NoSubArch, llvm::Triple::AMD,
+ llvm::Triple::AMDHSA);
llvm::AMDGPU::fillAMDGPUFeatureMap(CurrentGFXIP.back().second, AMDGCN,
FeatureMap);
}
|
|
@llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) ChangesThis will make it easier to use the subarch field in the future. Full diff: https://github.com/llvm/llvm-project/pull/193811.diff 4 Files Affected:
diff --git a/clang/lib/Basic/OffloadArch.cpp b/clang/lib/Basic/OffloadArch.cpp
index 1dea381bf7682..4f739f7352cb7 100644
--- a/clang/lib/Basic/OffloadArch.cpp
+++ b/clang/lib/Basic/OffloadArch.cpp
@@ -152,15 +152,20 @@ OffloadArch StringToOffloadArch(llvm::StringRef S) {
llvm::Triple OffloadArchToTriple(const llvm::Triple &DefaultToolchainTriple,
OffloadArch ID) {
if (ID == OffloadArch::AMDGCNSPIRV)
- return llvm::Triple("spirv64-amd-amdhsa");
+ return llvm::Triple(llvm::Triple::spirv64, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA);
- if (IsNVIDIAOffloadArch(ID))
- return DefaultToolchainTriple.isArch64Bit()
- ? llvm::Triple("nvptx64-nvidia-cuda")
- : llvm::Triple("nvptx-nvidia-cuda");
+ if (IsNVIDIAOffloadArch(ID)) {
+ llvm::Triple::ArchType Arch = DefaultToolchainTriple.isArch64Bit()
+ ? llvm::Triple::nvptx64
+ : llvm::Triple::nvptx;
+ return llvm::Triple(Arch, llvm::Triple::NoSubArch, llvm::Triple::NVIDIA,
+ llvm::Triple::CUDA);
+ }
if (IsAMDOffloadArch(ID))
- return llvm::Triple("amdgcn-amd-amdhsa");
+ return llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA);
return {};
}
diff --git a/clang/lib/Basic/Targets/SPIR.cpp b/clang/lib/Basic/Targets/SPIR.cpp
index 7726c95cc7392..e55a6ed8bc63f 100644
--- a/clang/lib/Basic/Targets/SPIR.cpp
+++ b/clang/lib/Basic/Targets/SPIR.cpp
@@ -108,7 +108,10 @@ void SPIRV64TargetInfo::getTargetDefines(const LangOptions &Opts,
DefineStd(Builder, "SPIRV64", Opts);
}
-static const AMDGPUTargetInfo AMDGPUTI(llvm::Triple("amdgcn-amd-amdhsa"), {});
+static const AMDGPUTargetInfo
+ AMDGPUTI(llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA),
+ {});
ArrayRef<const char *> SPIRV64AMDGCNTargetInfo::getGCCRegNames() const {
return AMDGPUTI.getGCCRegNames();
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 2cbe3179892ff..a7f8820fc991a 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -973,8 +973,10 @@ static TripleSet inferOffloadToolchains(Compilation &C,
for (llvm::StringRef Arch : Archs) {
OffloadArch ID = StringToOffloadArch(Arch);
if (ID == OffloadArch::Unknown)
- ID = StringToOffloadArch(
- getProcessorFromTargetID(llvm::Triple("amdgcn-amd-amdhsa"), Arch));
+ ID = StringToOffloadArch(getProcessorFromTargetID(
+ llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA),
+ Arch));
if (Kind == Action::OFK_HIP && !IsAMDOffloadArch(ID)) {
C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
@@ -1020,17 +1022,20 @@ static TripleSet inferOffloadToolchains(Compilation &C,
// Infer the default target triple if no specific architectures are given.
if (Archs.empty() && Kind == Action::OFK_HIP)
- Triples.insert(llvm::Triple("amdgcn-amd-amdhsa"));
- else if (Archs.empty() && Kind == Action::OFK_Cuda)
+ Triples.insert(llvm::Triple(llvm::Triple::amdgcn, llvm::Triple::NoSubArch,
+ llvm::Triple::AMD, llvm::Triple::AMDHSA));
+ else if (Archs.empty() && Kind == Action::OFK_Cuda) {
+ llvm::Triple::ArchType Arch =
+ C.getDefaultToolChain().getTriple().isArch64Bit()
+ ? llvm::Triple::nvptx64
+ : llvm::Triple::nvptx;
+ Triples.insert(llvm::Triple(Arch, llvm::Triple::NoSubArch,
+ llvm::Triple::NVIDIA, llvm::Triple::CUDA));
+ } else if (Archs.empty() && Kind == Action::OFK_SYCL)
Triples.insert(
llvm::Triple(C.getDefaultToolChain().getTriple().isArch64Bit()
- ? "nvptx64-nvidia-cuda"
- : "nvptx-nvidia-cuda"));
- else if (Archs.empty() && Kind == Action::OFK_SYCL)
- Triples.insert(
- llvm::Triple(C.getDefaultToolChain().getTriple().isArch64Bit()
- ? "spirv64-unknown-unknown"
- : "spirv32-unknown-unknown"));
+ ? llvm::Triple::spirv64
+ : llvm::Triple::spirv32));
// We need to dispatch these to the appropriate toolchain now.
C.getArgs().eraseArg(options::OPT_offload_arch_EQ);
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index ea2ae0c264de5..96a4648f06314 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -1024,7 +1024,9 @@ bool DiagnoseUnguardedBuiltins::VisitCallExpr(CallExpr *CE) {
for (auto &&F : llvm::split(BInfo.getRequiredFeatures(GID), ','))
FeatureMap[F] = true;
} else {
- static const llvm::Triple AMDGCN("amdgcn-amd-amdhsa");
+ static const llvm::Triple AMDGCN(llvm::Triple::amdgcn,
+ llvm::Triple::NoSubArch, llvm::Triple::AMD,
+ llvm::Triple::AMDHSA);
llvm::AMDGPU::fillAMDGPUFeatureMap(CurrentGFXIP.back().second, AMDGCN,
FeatureMap);
}
|
🪟 Windows x64 Test Results
All executed tests passed, but another part of the build failed. Click on a failure below to see the details. [code=4294967295] bin/llvm-cov.exeIf these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the |
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/67/builds/3136 Here is the relevant piece of the build log for the reference |

This will make it easier to use the subarch field in the future.