Skip to content

[AArch64][PAC] Implement code generation for @llvm.ptrauth.auth #72502

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
127 changes: 127 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2869,6 +2869,130 @@ AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {
return BB;
}

MachineBasicBlock *
AArch64TargetLowering::EmitPAuthInstr(MachineInstr &MI,
MachineBasicBlock *BB) const {
const AArch64InstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
DebugLoc DL = MI.getDebugLoc();

// The discriminator should be expressed by consecutive operands
// (raw_register, immediate_integer, is_blend). This function accepts
// (reg, 0, 0) operands generated by the instruction selector and tries
// to detect either small immediate discriminator expressed as
// (XZR, small_int, 0), blend(something, small_int) expressed as
// (something, small_int, 1) or keeps the operands as-is otherwise.
auto DetectDiscriminator = [&](unsigned RegDiscOpIndex) {
MachineOperand &RegOp = MI.getOperand(RegDiscOpIndex);
MachineOperand &ImmOp = MI.getOperand(RegDiscOpIndex + 1);
MachineOperand &IsBlendOp = MI.getOperand(RegDiscOpIndex + 2);
assert(ImmOp.getImm() == 0 && "Operand was already initialized");
assert(IsBlendOp.getImm() == 0 && "Operand was already initialized");

// Walk through the chain of copy-like instructions until we find
// a known signing schema, if any.
Register AddrDisc;
uint64_t ImmDisc;
for (Register DiscReg = RegOp.getReg(); DiscReg.isVirtual();) {
MachineInstr *DefiningMI = MRI.getVRegDef(DiscReg);
switch (DefiningMI->getOpcode()) {
case AArch64::COPY:
DiscReg = DefiningMI->getOperand(1).getReg();
if (DiscReg == AArch64::XZR) {
// Zero discriminator: (XZR, 0, 0).
RegOp.setReg(AArch64::XZR);
return;
}
break;
case AArch64::SUBREG_TO_REG:
DiscReg = DefiningMI->getOperand(2).getReg();
break;
case AArch64::MOVi32imm:
ImmDisc = DefiningMI->getOperand(1).getImm();
// If ImmDisc does not fit in 16 bits,
// consider it as custom computation.
if ((ImmDisc & 0xffff) == ImmDisc) {
// Small immediate integer: (XZR, imm, 0).
RegOp.setReg(AArch64::XZR);
ImmOp.setImm(ImmDisc);
}
return;
case AArch64::PAUTH_BLEND:
AddrDisc = DefiningMI->getOperand(1).getReg();
ImmDisc = DefiningMI->getOperand(2).getImm();
assert((ImmDisc & 0xffff) == ImmDisc &&
"Expected 16-bit integer operand in PAUTH_BLEND");
RegOp.setReg(AddrDisc);
ImmOp.setImm(ImmDisc);
IsBlendOp.setImm(1);
return;
default:
// Custom computation, leave it as-is.
return;
}
}
};

auto PopImplicitDef = [&](Register ExpectedReg) {
(void)ExpectedReg;
unsigned Index = MI.getNumOperands() - 1;
assert(MI.getOperand(Index).isImplicit());
assert(MI.getOperand(Index).isDef());
assert(MI.getOperand(Index).getReg() == ExpectedReg);
MI.removeOperand(Index);
};

auto AdjustDefinedRegisters = [&](unsigned TiedRegDiscOpIndex) {
Register RegDisc = MI.getOperand(TiedRegDiscOpIndex).getReg();

// The instruction, as selected by TableGen-erated code, has X16 and X17
// registers implicitly defined, to make sure they are safe to clobber.
//
// Remove these generic implicit defs here and re-add them as needed and
// if needed. If assertions are enabled, additionally check that the two
// implicit operands are the expected ones.
PopImplicitDef(AArch64::X17);
PopImplicitDef(AArch64::X16);

// $scratch operand is tied to $reg_disc, thus if an immediate integer
// discriminator is used, $scratch ends up being XZR. In that case, add
// an implicit-def scratch register - this is a special case known by
// aarch64-ptrauth pass.
MachineOperand *RealScratchOp = &MI.getOperand(1);
if (RegDisc == AArch64::XZR) {
MI.getOperand(1).setReg(AArch64::XZR);
Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
MI.addOperand(MachineOperand::CreateReg(ScratchReg, /*isDef=*/true,
/*isImp=*/true));
RealScratchOp = &MI.getOperand(MI.getNumOperands() - 1);
}

assert((RegDisc == AArch64::XZR || RegDisc.isVirtual()) &&
"Accidentally clobbering register?");

// If target CPU does not support FEAT_PAuth, IA and IB keys are still
// usable via HINT-encoded instructions.
if (!Subtarget->hasPAuth()) {
Register AutedReg = MI.getOperand(0).getReg();

MI.getOperand(0).setReg(AArch64::X17);
RealScratchOp->setReg(AArch64::X16);
BuildMI(*BB, MI.getNextNode(), DL, TII->get(AArch64::COPY), AutedReg)
.addReg(AArch64::X17);
}
};

switch (MI.getOpcode()) {
default:
llvm_unreachable("Unhandled opcode");
case AArch64::PAUTH_AUTH:
DetectDiscriminator(/*RegDiscOpIndex=*/3);
AdjustDefinedRegisters(/*TiedRegDiscOpIndex=*/3);
break;
}
return BB;
}

MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *BB) const {

Expand Down Expand Up @@ -2922,6 +3046,9 @@ MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
return BB;

case AArch64::PAUTH_AUTH:
return EmitPAuthInstr(MI, BB);

case AArch64::CATCHRET:
return EmitLoweredCatchRet(MI, BB);

Expand Down
3 changes: 3 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -643,6 +643,9 @@ class AArch64TargetLowering : public TargetLowering {
unsigned Opcode, bool Op0IsDef) const;
MachineBasicBlock *EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const;

MachineBasicBlock *EmitPAuthInstr(MachineInstr &MI,
MachineBasicBlock *BB) const;

MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
Expand Down
23 changes: 23 additions & 0 deletions llvm/lib/Target/AArch64/AArch64InstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -1584,6 +1584,25 @@ def PAUTH_EPILOGUE : Pseudo<(outs), (ins), []>, Sched<[]>;
def PAUTH_BLEND : Pseudo<(outs GPR64:$disc),
(ins GPR64:$addr_disc, i32imm:$int_disc), []>, Sched<[]>;

// Two tasks are handled by custom inserter:
// 1. It tries to detect known signing schemas: either small immediate integer
// discriminator or an arbitrary register blended with a small integer -
// if such schema is detected, it is saved into the instruction's operands.
// 2. It is worth to reuse $reg_disc as a scratch register unless we use
// immediate integer as a discriminator (in that case $reg_disc is XZR).
// In the latter case $scratch is technically XZR, but another def-ed
// register is added as an implicit operand by the inserter.
//
// See the comments in custom inserter code for explanation of the reason
// to specify "Defs = [X16, X17]" here.
let usesCustomInserter = 1, Defs = [X16, X17] in {
def PAUTH_AUTH : Pseudo<(outs GPR64common:$auted, GPR64:$scratch),
(ins GPR64common:$signed,
GPR64:$reg_disc, i32imm:$int_disc,
i32imm:$is_blended, i32imm:$key_id), [],
"$auted = $signed, $scratch = $reg_disc">, Sched<[]>;
}

// These pointer authentication instructions require armv8.3a
let Predicates = [HasPAuth] in {

Expand Down Expand Up @@ -9337,6 +9356,10 @@ def : Pat<(int_ptrauth_blend GPR64:$Rd, imm64_0_65535:$imm),
def : Pat<(int_ptrauth_blend GPR64:$Rd, GPR64:$Rn),
(BFMXri GPR64:$Rd, GPR64:$Rn, 16, 15)>;

def : Pat<(int_ptrauth_auth GPR64:$signed,
timm:$key_id, GPR64:$reg_disc),
(PAUTH_AUTH GPR64:$signed, GPR64:$reg_disc, 0, 0, timm:$key_id)>;

//-----------------------------------------------------------------------------

// This gets lowered into an instruction sequence of 20 bytes
Expand Down
Loading