diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td index 18c3efc7b9d46..55dbc1a803e13 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.td +++ b/llvm/lib/Target/AMDGPU/AMDGPU.td @@ -1803,6 +1803,9 @@ def HasFlatAddressSpace : Predicate<"Subtarget->hasFlatAddressSpace()">, def HasBufferFlatGlobalAtomicsF64 : Predicate<"Subtarget->hasBufferFlatGlobalAtomicsF64()">, AssemblerPredicate<(any_of FeatureGFX90AInsts)>; +def HasLdsAtomicAddF64 : + Predicate<"Subtarget->hasLdsAtomicAddF64()">, + AssemblerPredicate<(any_of FeatureGFX90AInsts)>; def HasFlatGlobalInsts : Predicate<"Subtarget->hasFlatGlobalInsts()">, AssemblerPredicate<(all_of FeatureFlatGlobalInsts)>; diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp index 17ffb7ec988f0..97952de3e6a37 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -1593,7 +1593,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_, auto &Atomic = getActionDefinitionsBuilder(G_ATOMICRMW_FADD); if (ST.hasLDSFPAtomicAdd()) { Atomic.legalFor({{S32, LocalPtr}, {S32, RegionPtr}}); - if (ST.hasGFX90AInsts()) + if (ST.hasLdsAtomicAddF64()) Atomic.legalFor({{S64, LocalPtr}}); if (ST.hasAtomicDsPkAdd16Insts()) Atomic.legalFor({{V2S16, LocalPtr}}); diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td index 0888fb84a22fa..9ff74d9958304 100644 --- a/llvm/lib/Target/AMDGPU/DSInstructions.td +++ b/llvm/lib/Target/AMDGPU/DSInstructions.td @@ -486,10 +486,10 @@ def DS_WRITE_ADDTID_B32 : DS_0A1D_NORET<"ds_write_addtid_b32">; } // End mayLoad = 0 -let SubtargetPredicate = isGFX90APlus in { +let SubtargetPredicate = HasLdsAtomicAddF64 in { defm DS_ADD_F64 : DS_1A1D_NORET_mc_gfx9<"ds_add_f64", VReg_64>; defm DS_ADD_RTN_F64 : DS_1A1D_RET_mc_gfx9<"ds_add_rtn_f64", VReg_64, "ds_add_f64">; -} // End SubtargetPredicate = isGFX90APlus +} // End SubtargetPredicate = HasLdsAtomicAddF64 let SubtargetPredicate = HasAtomicDsPkAdd16Insts in { defm DS_PK_ADD_F16 : DS_1A1D_NORET_mc<"ds_pk_add_f16">; @@ -1128,7 +1128,7 @@ let SubtargetPredicate = isGFX11Plus in { defm : DSAtomicCmpXChg_mc; } // End SubtargetPredicate = isGFX11Plus -let SubtargetPredicate = isGFX90APlus in { +let SubtargetPredicate = HasLdsAtomicAddF64 in { def : DSAtomicRetPat; let AddedComplexity = 1 in def : DSAtomicRetPat; diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index cbc5ffa8c5123..4f8eeaaf500b4 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -641,6 +641,9 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo, // BUFFER/FLAT/GLOBAL_ATOMIC_ADD/MIN/MAX_F64 bool hasBufferFlatGlobalAtomicsF64() const { return hasGFX90AInsts(); } + // DS_ADD_F64/DS_ADD_RTN_F64 + bool hasLdsAtomicAddF64() const { return hasGFX90AInsts(); } + bool hasMultiDwordFlatScratchAddressing() const { return getGeneration() >= GFX9; }