[RISCV] Rematerialize vmv.s.x and vfmv.s.f (#108012)

Continuing with #107993 and #108007, this handles the last of the main
rematerializable vector instructions.

There's an extra spill in one of the test cases, but it's likely noise
from the spill weights and isn't an issue in practice.
This commit is contained in:
Luke Lau
2024-09-11 09:44:57 +08:00
committed by GitHub
parent 21a0176c58
commit 933fc63a1d
4 changed files with 600 additions and 448 deletions

View File

@@ -172,6 +172,8 @@ bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
case RISCV::VMV_V_X:
case RISCV::VFMV_V_F:
case RISCV::VMV_V_I:
case RISCV::VMV_S_X:
case RISCV::VFMV_S_F:
case RISCV::VID_V:
if (MI.getOperand(1).isUndef() &&
/* After RISCVInsertVSETVLI most pseudos will have implicit uses on vl

View File

@@ -6764,7 +6764,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
Pseudo<(outs GPR:$rd), (ins VR:$rs2, ixlenimm:$sew), []>,
Sched<[WriteVMovXS, ReadVMovXS]>,
RISCVVPseudo;
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, isReMaterializable = 1,
Constraints = "$rd = $rs1" in
def PseudoVMV_S_X: Pseudo<(outs VR:$rd),
(ins VR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),
@@ -6787,7 +6787,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
(ins VR:$rs2, ixlenimm:$sew), []>,
Sched<[WriteVMovFS, ReadVMovFS]>,
RISCVVPseudo;
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, isReMaterializable = 1,
Constraints = "$rd = $rs1" in
def "PseudoVFMV_S_" # f.FX :
Pseudo<(outs VR:$rd),

View File

@@ -377,3 +377,133 @@ define void @vfmv.v.f(ptr %p, double %x) {
store volatile double %x, ptr %p
ret void
}
define void @vmv.s.x(ptr %p, i64 %x) {
; POSTRA-LABEL: vmv.s.x:
; POSTRA: # %bb.0:
; POSTRA-NEXT: vsetvli a2, zero, e64, m1, ta, ma
; POSTRA-NEXT: vmv.s.x v8, a1
; POSTRA-NEXT: vs8r.v v8, (a0)
; POSTRA-NEXT: vl8re64.v v16, (a0)
; POSTRA-NEXT: vl8re64.v v24, (a0)
; POSTRA-NEXT: vl8re64.v v0, (a0)
; POSTRA-NEXT: vl8re64.v v8, (a0)
; POSTRA-NEXT: vs8r.v v8, (a0)
; POSTRA-NEXT: vs8r.v v0, (a0)
; POSTRA-NEXT: vs8r.v v24, (a0)
; POSTRA-NEXT: vs8r.v v16, (a0)
; POSTRA-NEXT: vmv.s.x v8, a1
; POSTRA-NEXT: vs8r.v v8, (a0)
; POSTRA-NEXT: sd a1, 0(a0)
; POSTRA-NEXT: ret
;
; PRERA-LABEL: vmv.s.x:
; PRERA: # %bb.0:
; PRERA-NEXT: addi sp, sp, -16
; PRERA-NEXT: .cfi_def_cfa_offset 16
; PRERA-NEXT: csrr a2, vlenb
; PRERA-NEXT: slli a2, a2, 3
; PRERA-NEXT: sub sp, sp, a2
; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; PRERA-NEXT: vsetvli a2, zero, e64, m1, ta, ma
; PRERA-NEXT: vmv.s.x v8, a1
; PRERA-NEXT: vs8r.v v8, (a0)
; PRERA-NEXT: vl8re64.v v16, (a0)
; PRERA-NEXT: addi a2, sp, 16
; PRERA-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; PRERA-NEXT: vl8re64.v v24, (a0)
; PRERA-NEXT: vl8re64.v v0, (a0)
; PRERA-NEXT: vl8re64.v v16, (a0)
; PRERA-NEXT: vs8r.v v16, (a0)
; PRERA-NEXT: vs8r.v v0, (a0)
; PRERA-NEXT: vs8r.v v24, (a0)
; PRERA-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
; PRERA-NEXT: vs8r.v v16, (a0)
; PRERA-NEXT: vs8r.v v8, (a0)
; PRERA-NEXT: sd a1, 0(a0)
; PRERA-NEXT: csrr a0, vlenb
; PRERA-NEXT: slli a0, a0, 3
; PRERA-NEXT: add sp, sp, a0
; PRERA-NEXT: addi sp, sp, 16
; PRERA-NEXT: ret
%vmv.s.x = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
store volatile <vscale x 8 x i64> %vmv.s.x, ptr %p
%a = load volatile <vscale x 8 x i64>, ptr %p
%b = load volatile <vscale x 8 x i64>, ptr %p
%c = load volatile <vscale x 8 x i64>, ptr %p
%d = load volatile <vscale x 8 x i64>, ptr %p
store volatile <vscale x 8 x i64> %d, ptr %p
store volatile <vscale x 8 x i64> %c, ptr %p
store volatile <vscale x 8 x i64> %b, ptr %p
store volatile <vscale x 8 x i64> %a, ptr %p
store volatile <vscale x 8 x i64> %vmv.s.x, ptr %p
store volatile i64 %x, ptr %p
ret void
}
define void @vfmv.s.f(ptr %p, double %x) {
; POSTRA-LABEL: vfmv.s.f:
; POSTRA: # %bb.0:
; POSTRA-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; POSTRA-NEXT: vfmv.s.f v8, fa0
; POSTRA-NEXT: vs8r.v v8, (a0)
; POSTRA-NEXT: vl8re64.v v16, (a0)
; POSTRA-NEXT: vl8re64.v v24, (a0)
; POSTRA-NEXT: vl8re64.v v0, (a0)
; POSTRA-NEXT: vl8re64.v v8, (a0)
; POSTRA-NEXT: vs8r.v v8, (a0)
; POSTRA-NEXT: vs8r.v v0, (a0)
; POSTRA-NEXT: vs8r.v v24, (a0)
; POSTRA-NEXT: vs8r.v v16, (a0)
; POSTRA-NEXT: vfmv.s.f v8, fa0
; POSTRA-NEXT: vs8r.v v8, (a0)
; POSTRA-NEXT: fsd fa0, 0(a0)
; POSTRA-NEXT: ret
;
; PRERA-LABEL: vfmv.s.f:
; PRERA: # %bb.0:
; PRERA-NEXT: addi sp, sp, -16
; PRERA-NEXT: .cfi_def_cfa_offset 16
; PRERA-NEXT: csrr a1, vlenb
; PRERA-NEXT: slli a1, a1, 3
; PRERA-NEXT: sub sp, sp, a1
; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; PRERA-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; PRERA-NEXT: vfmv.s.f v8, fa0
; PRERA-NEXT: vs8r.v v8, (a0)
; PRERA-NEXT: vl8re64.v v16, (a0)
; PRERA-NEXT: addi a1, sp, 16
; PRERA-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; PRERA-NEXT: vl8re64.v v24, (a0)
; PRERA-NEXT: vl8re64.v v0, (a0)
; PRERA-NEXT: vl8re64.v v16, (a0)
; PRERA-NEXT: vs8r.v v16, (a0)
; PRERA-NEXT: vs8r.v v0, (a0)
; PRERA-NEXT: vs8r.v v24, (a0)
; PRERA-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; PRERA-NEXT: vs8r.v v16, (a0)
; PRERA-NEXT: vs8r.v v8, (a0)
; PRERA-NEXT: fsd fa0, 0(a0)
; PRERA-NEXT: csrr a0, vlenb
; PRERA-NEXT: slli a0, a0, 3
; PRERA-NEXT: add sp, sp, a0
; PRERA-NEXT: addi sp, sp, 16
; PRERA-NEXT: ret
%vfmv.s.f = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
store volatile <vscale x 8 x double> %vfmv.s.f, ptr %p
%a = load volatile <vscale x 8 x double>, ptr %p
%b = load volatile <vscale x 8 x double>, ptr %p
%c = load volatile <vscale x 8 x double>, ptr %p
%d = load volatile <vscale x 8 x double>, ptr %p
store volatile <vscale x 8 x double> %d, ptr %p
store volatile <vscale x 8 x double> %c, ptr %p
store volatile <vscale x 8 x double> %b, ptr %p
store volatile <vscale x 8 x double> %a, ptr %p
store volatile <vscale x 8 x double> %vfmv.s.f, ptr %p
store volatile double %x, ptr %p
ret void
}