Skip to content

AtomicExpand: Copy metadata from atomicrmw to cmpxchg #109409

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Oct 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions llvm/include/llvm/CodeGen/AtomicExpandUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,11 @@ class Value;

/// Parameters (see the expansion example below):
/// (the builder, %addr, %loaded, %new_val, ordering,
/// /* OUT */ %success, /* OUT */ %new_loaded)
using CreateCmpXchgInstFun =
function_ref<void(IRBuilderBase &, Value *, Value *, Value *, Align,
AtomicOrdering, SyncScope::ID, Value *&, Value *&)>;
/// /* OUT */ %success, /* OUT */ %new_loaded,
/// %MetadataSrc)
using CreateCmpXchgInstFun = function_ref<void(
IRBuilderBase &, Value *, Value *, Value *, Align, AtomicOrdering,
SyncScope::ID, Value *&, Value *&, Instruction *)>;

/// Expand an atomic RMW instruction into a loop utilizing
/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
Expand Down
91 changes: 51 additions & 40 deletions llvm/lib/CodeGen/AtomicExpandPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class AtomicExpandImpl {
IRBuilderBase &Builder, Type *ResultType, Value *Addr, Align AddrAlign,
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
CreateCmpXchgInstFun CreateCmpXchg);
CreateCmpXchgInstFun CreateCmpXchg, Instruction *MetadataSrc);
bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI);

bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
Expand Down Expand Up @@ -194,6 +194,39 @@ static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
}

/// Copy metadata that's safe to preserve when widening atomics.
static void copyMetadataForAtomic(Instruction &Dest,
const Instruction &Source) {
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Source.getAllMetadata(MD);
LLVMContext &Ctx = Dest.getContext();
MDBuilder MDB(Ctx);

for (auto [ID, N] : MD) {
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_noalias_addrspace:
case LLVMContext::MD_access_group:
case LLVMContext::MD_mmra:
Dest.setMetadata(ID, N);
break;
default:
if (ID == Ctx.getMDKindID("amdgpu.no.remote.memory"))
Dest.setMetadata(ID, N);
else if (ID == Ctx.getMDKindID("amdgpu.no.fine.grained.memory"))
Dest.setMetadata(ID, N);

// Losing amdgpu.ignore.denormal.mode, but it doesn't matter for current
// uses.
break;
}
}
}

// Determine if a particular atomic operation has a supported size,
// and is of appropriate alignment, to be passed through for target
// lowering. (Versus turning into a __atomic libcall)
Expand Down Expand Up @@ -600,7 +633,8 @@ void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
Value *Loaded, Value *NewVal, Align AddrAlign,
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
Value *&Success, Value *&NewLoaded) {
Value *&Success, Value *&NewLoaded,
Instruction *MetadataSrc) {
Type *OrigTy = NewVal->getType();

// This code can go away when cmpxchg supports FP and vector types.
Expand All @@ -612,9 +646,12 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
Loaded = Builder.CreateBitCast(Loaded, IntTy);
}

Value *Pair = Builder.CreateAtomicCmpXchg(
AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
if (MetadataSrc)
copyMetadataForAtomic(*Pair, *MetadataSrc);

Success = Builder.CreateExtractValue(Pair, 1, "success");
NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");

Expand Down Expand Up @@ -951,9 +988,9 @@ void AtomicExpandImpl::expandPartwordAtomicRMW(

Value *OldResult;
if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr,
PMV.AlignedAddrAlignment, MemOpOrder, SSID,
PerformPartwordOp, createCmpXchgInstFun);
OldResult = insertRMWCmpXchgLoop(
Builder, PMV.WordType, PMV.AlignedAddr, PMV.AlignedAddrAlignment,
MemOpOrder, SSID, PerformPartwordOp, createCmpXchgInstFun, AI);
} else {
assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
Expand All @@ -966,36 +1003,6 @@ void AtomicExpandImpl::expandPartwordAtomicRMW(
AI->eraseFromParent();
}

/// Copy metadata that's safe to preserve when widening atomics.
static void copyMetadataForAtomic(Instruction &Dest,
const Instruction &Source) {
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
Source.getAllMetadata(MD);
LLVMContext &Ctx = Dest.getContext();
MDBuilder MDB(Ctx);

for (auto [ID, N] : MD) {
switch (ID) {
case LLVMContext::MD_dbg:
case LLVMContext::MD_tbaa:
case LLVMContext::MD_tbaa_struct:
case LLVMContext::MD_alias_scope:
case LLVMContext::MD_noalias:
case LLVMContext::MD_access_group:
case LLVMContext::MD_mmra:
Dest.setMetadata(ID, N);
break;
default:
if (ID == Ctx.getMDKindID("amdgpu.no.remote.memory"))
Dest.setMetadata(ID, N);
else if (ID == Ctx.getMDKindID("amdgpu.no.fine.grained.memory"))
Dest.setMetadata(ID, N);

break;
}
}
}

// Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
ReplacementIRBuilder Builder(AI, *DL);
Expand Down Expand Up @@ -1591,7 +1598,7 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
CreateCmpXchgInstFun CreateCmpXchg) {
CreateCmpXchgInstFun CreateCmpXchg, Instruction *MetadataSrc) {
LLVMContext &Ctx = Builder.getContext();
BasicBlock *BB = Builder.GetInsertBlock();
Function *F = BB->getParent();
Expand Down Expand Up @@ -1637,7 +1644,7 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
MemOpOrder == AtomicOrdering::Unordered
? AtomicOrdering::Monotonic
: MemOpOrder,
SSID, Success, NewLoaded);
SSID, Success, NewLoaded, MetadataSrc);
assert(Success && NewLoaded);

Loaded->addIncoming(NewLoaded, LoopBB);
Expand Down Expand Up @@ -1686,7 +1693,7 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
AI->getValOperand());
},
CreateCmpXchg);
CreateCmpXchg, /*MetadataSrc=*/AI);

AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();
Expand Down Expand Up @@ -1838,11 +1845,15 @@ void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
expandAtomicRMWToCmpXchg(
I, [this](IRBuilderBase &Builder, Value *Addr, Value *Loaded,
Value *NewVal, Align Alignment, AtomicOrdering MemOpOrder,
SyncScope::ID SSID, Value *&Success, Value *&NewLoaded) {
SyncScope::ID SSID, Value *&Success, Value *&NewLoaded,
Instruction *MetadataSrc) {
// Create the CAS instruction normally...
AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
Addr, Loaded, NewVal, Alignment, MemOpOrder,
AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
if (MetadataSrc)
copyMetadataForAtomic(*Pair, *MetadataSrc);

Success = Builder.CreateExtractValue(Pair, 1, "success");
NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");

Expand Down
Loading
Loading