AMDGPU: Fix temporal divergence introduced by machine-sink (#67456)

Temporal divergence that was present in input or introduced in IR
transforms, like code-sinking or LICM, is handled in SIFixSGPRCopies
by changing sgpr source instr to vgpr instr.
After 5b657f5, that moved LICM after AMDGPUCodeGenPrepare,
machine-sinking can introduce temporal divergence by sinking
instructions outside of the cycle.
Add isSafeToSink callback in TargetInstrInfo.
This commit is contained in:
Petar Avramovic
2023-10-04 13:58:47 +02:00
parent 2d7fe90a3e
commit 2fa7d652d0
10 changed files with 96 additions and 6 deletions

View File

@@ -363,6 +363,25 @@ void GenericCycleInfo<ContextT>::compute(FunctionT &F) {
assert(validateTree());
}
template <typename ContextT>
void GenericCycleInfo<ContextT>::splitCriticalEdge(BlockT *Pred, BlockT *Succ,
BlockT *NewBlock) {
// Edge Pred-Succ is replaced by edges Pred-NewBlock and NewBlock-Succ, all
// cycles that had blocks Pred and Succ also get NewBlock.
CycleT *Cycle = this->getCycle(Pred);
if (Cycle && Cycle->contains(Succ)) {
while (Cycle) {
// FixMe: Appending NewBlock is fine as a set of blocks in a cycle. When
// printing cycle NewBlock is at the end of list but it should be in the
// middle to represent actual traversal of a cycle.
Cycle->appendBlock(NewBlock);
BlockMap.try_emplace(NewBlock, Cycle);
Cycle = Cycle->getParentCycle();
}
}
assert(validateTree());
}
/// \brief Find the innermost cycle containing a given block.
///
/// \returns the innermost cycle containing \p Block or nullptr if

View File

@@ -255,6 +255,7 @@ public:
void clear();
void compute(FunctionT &F);
void splitCriticalEdge(BlockT *Pred, BlockT *Succ, BlockT *New);
const FunctionT *getFunction() const { return Context.getFunction(); }
const ContextT &getSSAContext() const { return Context; }

View File

@@ -794,6 +794,15 @@ public:
static_cast<const MachineBasicBlock *>(this)->getSingleSuccessor());
}
/// Return the predecessor of this block if it has a single predecessor.
/// Otherwise return a null pointer.
///
const MachineBasicBlock *getSinglePredecessor() const;
MachineBasicBlock *getSinglePredecessor() {
return const_cast<MachineBasicBlock *>(
static_cast<const MachineBasicBlock *>(this)->getSinglePredecessor());
}
/// Return the fallthrough block if the block can implicitly
/// transfer control to the block after it by falling off the end of
/// it. If an explicit branch to the fallthrough block is not allowed,

View File

@@ -19,6 +19,7 @@
#include "llvm/ADT/Uniformity.h"
#include "llvm/CodeGen/MIRFormatter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineCycleAnalysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -150,6 +151,11 @@ public:
return false;
}
virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo,
MachineCycleInfo *CI) const {
return true;
}
protected:
/// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
/// set, this hook lets the target specify whether the instruction is actually

View File

@@ -960,6 +960,10 @@ const MachineBasicBlock *MachineBasicBlock::getSingleSuccessor() const {
return Successors.size() == 1 ? Successors[0] : nullptr;
}
const MachineBasicBlock *MachineBasicBlock::getSinglePredecessor() const {
return Predecessors.size() == 1 ? Predecessors[0] : nullptr;
}
MachineBasicBlock *MachineBasicBlock::getFallThrough(bool JumpToFallThrough) {
MachineFunction::iterator Fallthrough = getIterator();
++Fallthrough;

View File

@@ -735,6 +735,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
MadeChange = true;
++NumSplit;
CI->splitCriticalEdge(Pair.first, Pair.second, NewSucc);
} else
LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
}
@@ -1263,6 +1264,9 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
if (SuccToSinkTo && SuccToSinkTo->isInlineAsmBrIndirectTarget())
return nullptr;
if (SuccToSinkTo && !TII->isSafeToSink(MI, SuccToSinkTo, CI))
return nullptr;
return SuccToSinkTo;
}

View File

@@ -171,6 +171,48 @@ bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const {
isVALU(*MO.getParent()) && !resultDependsOnExec(*MO.getParent());
}
bool SIInstrInfo::isSafeToSink(MachineInstr &MI,
MachineBasicBlock *SuccToSinkTo,
MachineCycleInfo *CI) const {
// Allow sinking if MI edits lane mask (divergent i1 in sgpr).
if (MI.getOpcode() == AMDGPU::SI_IF_BREAK)
return true;
MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
// Check if sinking of MI would create temporal divergent use.
for (auto Op : MI.uses()) {
if (Op.isReg() && Op.getReg().isVirtual() &&
RI.isSGPRClass(MRI.getRegClass(Op.getReg()))) {
MachineInstr *SgprDef = MRI.getVRegDef(Op.getReg());
// SgprDef defined inside cycle
MachineCycle *FromCycle = CI->getCycle(SgprDef->getParent());
if (FromCycle == nullptr)
continue;
MachineCycle *ToCycle = CI->getCycle(SuccToSinkTo);
// Check if there is a FromCycle that contains SgprDef's basic block but
// does not contain SuccToSinkTo and also has divergent exit condition.
while (FromCycle && !FromCycle->contains(ToCycle)) {
// After structurize-cfg, there should be exactly one cycle exit.
SmallVector<MachineBasicBlock *, 1> ExitBlocks;
FromCycle->getExitBlocks(ExitBlocks);
assert(ExitBlocks.size() == 1);
assert(ExitBlocks[0]->getSinglePredecessor());
// FromCycle has divergent exit condition.
if (hasDivergentBranch(ExitBlocks[0]->getSinglePredecessor())) {
return false;
}
FromCycle = FromCycle->getParentCycle();
}
}
}
return true;
}
bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
int64_t &Offset0,
int64_t &Offset1) const {

View File

@@ -222,6 +222,9 @@ public:
bool isIgnorableUse(const MachineOperand &MO) const override;
bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo,
MachineCycleInfo *CI) const override;
bool areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, int64_t &Offset0,
int64_t &Offset1) const override;

View File

@@ -167,6 +167,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s59
; CHECK-NEXT: s_add_i32 s58, s58, 4
; CHECK-NEXT: s_add_i32 s4, s55, s58
; CHECK-NEXT: v_add_nc_u32_e32 v0, s58, v57
; CHECK-NEXT: s_add_i32 s5, s4, 5
; CHECK-NEXT: s_add_i32 s4, s4, 1
; CHECK-NEXT: v_cmp_ge_u32_e32 vcc_lo, s5, v42
@@ -267,7 +268,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: .LBB0_16: ; %Flow43
; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s57
; CHECK-NEXT: v_add_nc_u32_e32 v57, s58, v57
; CHECK-NEXT: v_mov_b32_e32 v57, v0
; CHECK-NEXT: .LBB0_17: ; %Flow44
; CHECK-NEXT: ; in Loop: Header=BB0_5 Depth=1
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s56
@@ -869,6 +870,7 @@ define protected amdgpu_kernel void @kernel_round1_short(ptr addrspace(1) nocapt
; CHECK-NEXT: s_add_i32 s7, s7, 4
; CHECK-NEXT: v_add_nc_u32_e32 v43, 1, v43
; CHECK-NEXT: s_add_i32 s8, s4, s7
; CHECK-NEXT: v_add_nc_u32_e32 v0, s7, v47
; CHECK-NEXT: s_add_i32 s9, s8, 5
; CHECK-NEXT: s_add_i32 s8, s8, 1
; CHECK-NEXT: v_cmp_ge_u32_e32 vcc_lo, s9, v41
@@ -879,7 +881,7 @@ define protected amdgpu_kernel void @kernel_round1_short(ptr addrspace(1) nocapt
; CHECK-NEXT: ; %bb.4: ; %Flow3
; CHECK-NEXT: ; in Loop: Header=BB1_1 Depth=1
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: v_add_nc_u32_e32 v47, s7, v47
; CHECK-NEXT: v_mov_b32_e32 v47, v0
; CHECK-NEXT: .LBB1_5: ; %Flow4
; CHECK-NEXT: ; in Loop: Header=BB1_1 Depth=1
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s5

View File

@@ -22,6 +22,7 @@ body: |
; CHECK-NEXT: [[PHI:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_1]], %bb.0, %6, %bb.1
; CHECK-NEXT: [[PHI1:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_]], %bb.0, %8, %bb.1
; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[PHI1]], [[S_MOV_B32_2]], implicit-def dead $scc
; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[S_ADD_I32_]], [[S_ADD_I32_]], 0, implicit $exec
; CHECK-NEXT: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[S_ADD_I32_]], 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32 = nofpexcept V_CMP_GT_F32_e64 0, killed [[V_CVT_F32_U32_e64_]], 0, [[COPY]], 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[SI_IF_BREAK:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_GT_F32_e64_]], [[PHI]], implicit-def dead $scc
@@ -30,7 +31,6 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: SI_END_CF [[SI_IF_BREAK]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[S_ADD_I32_]], [[S_ADD_I32_]], 0, implicit $exec
; CHECK-NEXT: FLAT_STORE_DWORD [[COPY1]], [[V_ADD_U32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; CHECK-NEXT: SI_RETURN
bb.0:
@@ -83,6 +83,9 @@ body: |
; CHECK-NEXT: [[PHI:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_1]], %bb.0, %6, %bb.1
; CHECK-NEXT: [[PHI1:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_]], %bb.0, %8, %bb.1
; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[PHI1]], [[S_MOV_B32_2]], implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_1:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_ADD_I32_]], [[S_MOV_B32_2]], implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_2:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_ADD_I32_1]], [[S_MOV_B32_2]], implicit-def dead $scc
; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[S_ADD_I32_2]], [[S_ADD_I32_2]], 0, implicit $exec
; CHECK-NEXT: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[S_ADD_I32_]], 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32 = nofpexcept V_CMP_GT_F32_e64 0, killed [[V_CVT_F32_U32_e64_]], 0, [[COPY]], 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[SI_IF_BREAK:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_GT_F32_e64_]], [[PHI]], implicit-def dead $scc
@@ -91,9 +94,6 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: SI_END_CF [[SI_IF_BREAK]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; CHECK-NEXT: [[S_ADD_I32_1:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_ADD_I32_]], [[S_MOV_B32_2]], implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_2:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_ADD_I32_1]], [[S_MOV_B32_2]], implicit-def dead $scc
; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[S_ADD_I32_2]], [[S_ADD_I32_2]], 0, implicit $exec
; CHECK-NEXT: FLAT_STORE_DWORD [[COPY1]], [[V_ADD_U32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; CHECK-NEXT: SI_RETURN
bb.0: